diff --git "a/2167.jsonl" "b/2167.jsonl" new file mode 100644--- /dev/null +++ "b/2167.jsonl" @@ -0,0 +1,1150 @@ +{"seq_id":"37300462155","text":"import os\r\nimport sys\r\n\r\n\r\ndef clear_terminal():\r\n \"\"\"\r\n \r\n Vide le terminal\r\n \r\n \"\"\"\r\n\r\n os.system('cls' if sys.platform == \"win32\" else 'clear')\r\n\r\ndef is_between(value, lower_bound, upper_bound):\r\n \"\"\"\r\n\r\n Vérifie si la valeur est entre deux valeurs\r\n\r\n Args :\r\n - value (int) : la valeur à vérifier\r\n - lower_bound (int) : la valeur de début\r\n - upper_bound (int) : la valeur de fin\r\n\r\n Return :\r\n - is_between (bool) : True si la valeur est entre les deux valeurs, False sinon\r\n\r\n \"\"\"\r\n\r\n is_between = False\r\n\r\n if not (value < lower_bound or value > upper_bound):\r\n is_between = True\r\n\r\n return is_between\r\n\r\ndef to_valid_positive_integer(prompt, lower_bound, upper_bound):\r\n \"\"\"\r\n\r\n Invite l'utilisateur à entrer une valeur entière positive dans les bornes spécifiées jusqu'à ce qu'une saisie valide soit fournie\r\n \r\n Args:\r\n - prompt (str): le message affiché à l'utilisateur pour demander une saisie\r\n - lower_bound (int): la borne inférieure pour la saisie valide (inclus)\r\n - upper_bound (int): la borne supérieure pour la saisie valide (inclus)\r\n \r\n Returns:\r\n - value (int): la saisie de l'utilisateur sous forme d'entier positif valide dans les bornes spécifiées\r\n\r\n \"\"\"\r\n\r\n is_good_input = False\r\n\r\n while not is_good_input:\r\n value = input(prompt)\r\n\r\n if not is_positive_integer(value):\r\n print(\"Ceci n'est pas un nombre entier positif...\")\r\n continue\r\n\r\n # Safe to cast to int\r\n value = int(value)\r\n\r\n if not is_between(value, lower_bound, upper_bound):\r\n print(f\"Choisissez une valeur entre {lower_bound} et {upper_bound}...\")\r\n continue\r\n \r\n is_good_input = True\r\n\r\n return value\r\n\r\ndef is_positive_integer(value):\r\n \"\"\"\r\n\r\n Vérifie si la valeur est un nombre positif\r\n\r\n \"\"\"\r\n\r\n is_positive_int = False\r\n\r\n if value.isdigit():\r\n is_positive_int = True\r\n\r\n return is_positive_int\r\n\r\ndef create_folder_if_not_exists(folder_path):\r\n \"\"\"\r\n\r\n Crée un dossier s'il n'existe pas de façon récursive\r\n\r\n Args :\r\n - folder_path (str) : le chemin du dossier à créer si il n'existe pas\r\n\r\n \"\"\"\r\n\r\n if not os.path.isdir(folder_path):\r\n os.makedirs(folder_path)\r\n\r\ndef try_default_file(base_filename, default_filename, error_message):\r\n \"\"\"\r\n \r\n Vérifie si le premier chemin existe, sinon, teste le deuxième chemin. Affiche le message d'erreur si aucun des deux chemins n'est trouvé\r\n\r\n Args :\r\n - base_filename (str) : le chemin de base\r\n - default_filename (str) : le chemin par défaut (fallback)\r\n - error_message (str) : le message d'erreur à afficher\r\n \r\n \"\"\"\r\n\r\n if not os.path.exists(base_filename):\r\n if not os.path.exists(default_filename):\r\n sys.exit(error_message)\r\n else:\r\n base_filename = default_filename\r\n\r\n return base_filename\r\n\r\ndef empty_apprentice_list(apprentices):\r\n \"\"\"\r\n \r\n Vérifie si la liste de apprentices est vide ou non\r\n\r\n Args :\r\n - apprentices (list[str]) : la liste de apprentices à vérifier\r\n \r\n \"\"\"\r\n\r\n is_empty = False\r\n\r\n if apprentices == None:\r\n print(\"================== LISTE DES APPRENTIS DÉBUT ==================\")\r\n print()\r\n print(\"Vous devez générer une liste d'apprentis (option 2) avant de lister les apprentis !\")\r\n print()\r\n input(\"Appuyer sur un bouton pour continuer : \")\r\n\r\n is_empty = True\r\n\r\n return is_empty\r\n\r\ndef get_type_map():\r\n \"\"\"\r\n \r\n Retourne un dictionnaire contenant les types de valeurs du fichier de configuration\r\n \r\n \"\"\"\r\n\r\n type_map = {\r\n \"int\": int,\r\n \"bool\": lambda x: x.lower() == \"true\",\r\n \"str\": str\r\n }\r\n\r\n return type_map\r\n\r\ndef get_max_combinations(first_names_path=\"./src/assets/csv_files/prenoms.csv\", last_names_path=\"./src/assets/csv_files/noms.csv\"):\r\n \"\"\"\r\n \r\n Obtient la valeur maximale des combinaisons possible entre les noms et prénoms\r\n\r\n Args :\r\n - first_names_path (str) : le chemin du fichier de prénoms\r\n - last_names_path (str) : le chemin du fichier de noms\r\n \r\n \"\"\"\r\n\r\n first_names_path = try_default_file(first_names_path, \"./app/assets/csv_files/prenoms.csv\", \"System error : prenoms.csv not found\")\r\n last_names_path = try_default_file(last_names_path, \"./app/assets/csv_files/noms.csv\", \"System error : noms.csv not found\")\r\n\r\n with open(first_names_path, mode='r') as first_name_file:\r\n first_names = first_name_file.readlines()\r\n\r\n with open(last_names_path, mode='r') as last_name_file:\r\n last_names = last_name_file.readlines()\r\n\r\n max_combination = len(first_names) * len(last_names)\r\n\r\n return max_combination","repo_name":"novaotp/apprentice_generator","sub_path":"app/modules/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6989595879","text":"import random\nimport sys\nimport pygame\n\n\n# 简单工厂 返回敌方坦克对象\nclass EnemyTankFactory:\n def __init__(self, total, big):\n # 敌方坦克总数量\n self.enemyTanks_total = total\n # 当前关卡重装坦克的数量\n self.enemyTanks_big = big\n # 坦克的等级数组\n self.list = []\n for _ in range(self.enemyTanks_big):\n self.list.append(2)\n for _ in range(self.enemyTanks_total - self.enemyTanks_big):\n self.list.append(random.randint(0, 1))\n random.shuffle(self.list)\n\n def factory(self, t):\n if t == 'tank':\n if len(self.list) > 0:\n return EnemyTank(self.list.pop())\n return EnemyTank(0)\n\n\n# 坦克类\nclass Tank(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.tank = None\n self.direction = None\n self.img = None\n self.image = None\n self.rect = None\n self.live = True\n self.stop = False\n self.speed = None\n self.flag = None\n self.bullet = None\n\n # 坦克移动\n def move(self, group, home):\n if not self.stop:\n rect = self.rect\n if self.direction == 'U':\n self.img = self.tank.subsurface((0, 0), (48, 48))\n self.rect = self.rect.move(0, 0 - self.speed)\n elif self.direction == 'D':\n self.img = self.tank.subsurface((0, 48), (48, 48))\n self.rect = self.rect.move(0, self.speed)\n elif self.direction == 'L':\n self.img = self.tank.subsurface((0, 96), (48, 48))\n self.rect = self.rect.move(0 - self.speed, 0)\n elif self.direction == 'R':\n self.img = self.tank.subsurface((0, 144), (48, 48))\n self.rect = self.rect.move(self.speed, 0)\n # 碰撞检测\n if self.is_blocked(group, home):\n if self.flag:\n self.rect = rect\n self.stop = True\n else:\n self.rect = rect\n self.direction = random.choice(['U', 'D', 'L', 'R'])\n\n # 开火\n def fire(self):\n self.bullet.live = True\n self.bullet.reload(self.direction)\n if self.direction == 'U':\n self.bullet.rect.left = self.rect.left + 20\n self.bullet.rect.bottom = self.rect.top - 1\n elif self.direction == 'D':\n self.bullet.rect.left = self.rect.left + 20\n self.bullet.rect.top = self.rect.bottom + 1\n elif self.direction == 'L':\n self.bullet.rect.right = self.rect.left - 1\n self.bullet.rect.top = self.rect.top + 20\n elif self.direction == 'R':\n self.bullet.rect.left = self.rect.right + 1\n self.bullet.rect.top = self.rect.top + 20\n\n # 碰撞检测\n def is_blocked(self, group, home):\n if self.rect.left < 0 or self.rect.top < 0:\n return True\n elif self.rect.right > 624 or self.rect.bottom > 624:\n return True\n for each in group:\n if pygame.sprite.spritecollide(self, each, False, None):\n return True\n if pygame.sprite.collide_rect(self, home):\n return True\n return False\n\n\n# 己方坦克类\nclass MyTank(Tank):\n def __init__(self, x, y, speed):\n super().__init__()\n self.direction = 'U'\n self.tank = pygame.image.load('images/myTank/myTank.png')\n self.img = self.tank.subsurface((0, 0), (48, 48))\n self.rect = self.img.get_rect()\n self.rect.left, self.rect.top = x, y\n self.speed = speed\n self.stop = True\n self.flag = True\n self.life = 3\n self.bullet = Bullet(6, self.flag)\n\n\n# 敌方坦克类\nclass EnemyTank(Tank):\n def __init__(self, level, x=None, y=None):\n \"\"\"\n :param x:\n :param y:\n :param level: 0-2\n \"\"\"\n super().__init__()\n self.direction = 'D'\n self.tanks0 = ['./images/enemyTank/enemy_1_0.png', './images/enemyTank/enemy_1_1.png',\n './images/enemyTank/enemy_1_2.png']\n self.tanks1 = ['./images/enemyTank/enemy_2_0.png', './images/enemyTank/enemy_2_1.png',\n './images/enemyTank/enemy_2_2.png']\n self.tanks2 = ['./images/enemyTank/enemy_3_0.png', './images/enemyTank/enemy_3_1.png',\n './images/enemyTank/enemy_3_2.png']\n self.tanks = [self.tanks0, self.tanks1, self.tanks2]\n self.level = level\n self.color = random.randint(0, 2)\n self.tank = pygame.image.load(self.tanks[self.level][self.color])\n self.img = self.tank.subsurface((0, 0), (48, 48))\n self.rect = self.img.get_rect()\n if x is None or y is None:\n x = random.randint(0, 2)\n self.rect.left, self.rect.top = x * 12 * 24, 3\n else:\n self.rect.left, self.rect.top = x, y\n self.speed = 3 - self.level\n self.blood = self.level + 1\n self.stop = False\n self.flag = False\n self.bullet = Bullet(6, self.flag)\n\n\n# 子弹类\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, speed, flag):\n pygame.sprite.Sprite.__init__(self)\n\n self.bullets = {\n 'U': pygame.image.load('./images/bullet/bullet_up.png'),\n 'D': pygame.image.load('./images/bullet/bullet_down.png'),\n 'L': pygame.image.load('./images/bullet/bullet_left.png'),\n 'R': pygame.image.load('./images/bullet/bullet_right.png')\n }\n self.direction = 'U'\n self.speed = speed\n self.img = self.bullets[self.direction]\n # 在坦克类中赋实际值\n self.rect = self.img.get_rect()\n self.rect.left, self.rect.top = 0, 0\n self.live = False\n self.flag = flag\n\n # 子弹重载\n def reload(self, direction):\n self.direction = direction\n self.img = self.bullets[self.direction]\n self.live = True\n\n # 子弹移动\n def move(self):\n if self.direction == 'U':\n self.rect = self.rect.move(0, 0 - self.speed)\n elif self.direction == 'D':\n self.rect = self.rect.move(0, self.speed)\n elif self.direction == 'L':\n self.rect = self.rect.move(0 - self.speed, 0)\n elif self.direction == 'R':\n self.rect = self.rect.move(self.speed, 0)\n if (0 > self.rect.top) or (self.rect.bottom > 630) or (self.rect.left < 0) or (self.rect.right > 630):\n self.live = False\n\n\n# 砖墙类\nclass Brick(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.img = pygame.image.load('./images/scene/brick.png')\n self.rect = self.img.get_rect()\n self.rect.left, self.rect.top = x, y\n self.health = 5\n self.live = True\n\n\n# 铁墙类\nclass Iron(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.img = pygame.image.load('./images/scene/iron.png')\n self.rect = self.img.get_rect()\n self.rect.left, self.rect.top = x, y\n self.health = 10\n self.live = True\n\n\n# 大本营类\nclass Home(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.homes = ['./images/home/home1.png', './images/home/home2.png', './images/home/home_destroyed.png']\n self.img = pygame.image.load(self.homes[0])\n self.rect = self.img.get_rect()\n self.rect.left, self.rect.top = (3 + 12 * 24, 3 + 24 * 24)\n self.alive = True\n\n\n# 地图类\nclass Map:\n def __init__(self, total):\n # 精灵组\n self.brickGroup = pygame.sprite.Group()\n self.ironGroup = pygame.sprite.Group()\n self.tanksGroup = pygame.sprite.Group()\n # 坦克精灵组\n self.myTanksGroup = pygame.sprite.Group()\n self.enemyTanksGroup = pygame.sprite.Group()\n # 子弹精灵组\n self.bulletsGroup = pygame.sprite.Group()\n self.enemyTanks_total = total\n # 场上存在的敌方坦克总数量\n self.enemyTanks_now = 0\n # 场上可以存在的敌方坦克总数量\n self.enemyTanks_now_max = 4\n # 大本营\n self.home = Home()\n self.stage()\n self.brick = None\n self.iron = None\n self.game_over = False\n\n # 地图初始化\n def stage(self):\n for x in [2, 3, 6, 7, 18, 19, 22, 23]:\n for y in [2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 18, 19, 20, 21, 22, 23]:\n self.brick = Brick(x * 24, y * 24)\n self.brickGroup.add(self.brick)\n for x in [10, 11, 14, 15]:\n for y in [2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 17, 18, 19, 20]:\n self.brick = Brick(x * 24, y * 24)\n self.brickGroup.add(self.brick)\n for x in [4, 5, 6, 7, 18, 19, 20, 21]:\n for y in [13, 14]:\n self.brick = Brick(x * 24, y * 24)\n self.brickGroup.add(self.brick)\n for x in [12, 13]:\n for y in [16, 17]:\n self.brick = Brick(x * 24, y * 24)\n self.brickGroup.add(self.brick)\n for x, y in [(11, 23), (12, 23), (13, 23), (14, 23), (11, 24), (14, 24), (11, 25), (14, 25)]:\n self.brick = Brick(x * 24, y * 24)\n self.brickGroup.add(self.brick)\n for x, y in [(0, 14), (1, 14), (12, 6), (13, 6), (12, 7), (13, 7), (24, 14), (25, 14)]:\n self.iron = Iron(x * 24, y * 24)\n self.ironGroup.add(self.iron)\n\n # 产生一定数量的敌方坦克\n def init_enemy_tank(self, factory):\n for x in range(0, 3):\n if self.enemyTanks_total > 0:\n enemy_tank = factory.factory('tank')\n if not pygame.sprite.spritecollide(enemy_tank, self.tanksGroup, False, None):\n self.tanksGroup.add(enemy_tank)\n self.enemyTanksGroup.add(enemy_tank)\n self.enemyTanks_now += 1\n self.enemyTanks_total -= 1\n\n # 当地图上的敌方坦克低于最大值时,新增坦克\n def create_new_enemy_tank(self, factory):\n if self.enemyTanks_total > 0:\n if self.enemyTanks_now < self.enemyTanks_now_max:\n enemy_tank = factory.factory('tank')\n if not pygame.sprite.spritecollide(enemy_tank, self.tanksGroup, False, None):\n self.tanksGroup.add(enemy_tank)\n self.enemyTanksGroup.add(enemy_tank)\n self.enemyTanks_now += 1\n self.enemyTanks_total -= 1\n\n # 显示地图场景\n def status_display(self, screen):\n # 砖墙\n for each in self.brickGroup:\n screen.blit(each.img, each.rect)\n # 铁墙\n for each in self.ironGroup:\n screen.blit(each.img, each.rect)\n # 家\n screen.blit(self.home.img, self.home.rect)\n\n # 显示坦克\n def tank_bullet_display(self, screen):\n # 敌方坦克\n for each in self.enemyTanksGroup:\n screen.blit(each.img, each.rect)\n if not each.stop:\n self.tanksGroup.remove(each)\n each.move([self.tanksGroup, self.brickGroup, self.ironGroup], self.home)\n self.tanksGroup.add(each)\n # 敌方坦克发射子弹\n for each in self.enemyTanksGroup:\n if each.live:\n if not each.stop and not each.bullet.live:\n self.bulletsGroup.remove(each.bullet)\n each.fire()\n self.bulletsGroup.add(each.bullet)\n # 子弹\n for tank in self.tanksGroup:\n if tank.bullet.live:\n tank.bullet.move()\n screen.blit(tank.bullet.img, tank.bullet.rect)\n # 子弹碰撞子弹\n self.bulletsGroup.remove(tank.bullet)\n for each in self.bulletsGroup:\n if each.live:\n if pygame.sprite.collide_rect(tank.bullet, each) and tank.bullet.flag != each.flag:\n tank.bullet.live = False\n each.live = False\n self.bulletsGroup.remove(each)\n break\n else:\n self.bulletsGroup.remove(each)\n # 子弹碰撞坦克\n for each in self.tanksGroup:\n if each.live:\n if tank.bullet.flag != each.flag and pygame.sprite.collide_rect(tank.bullet, each):\n # 己方坦克收到伤害\n if each.flag:\n Music.bang_sound.play()\n each.life -= 1\n if each.life < 0:\n self.myTanksGroup.remove(each)\n self.tanksGroup.remove(each)\n if len(self.myTanksGroup) < 1:\n self.game_over = True\n else:\n pass\n # each.reset()\n tank.bullet.live = False\n self.bulletsGroup.remove(tank.bullet)\n # 敌方坦克收到伤害\n else:\n each.blood -= 1\n # each.level -= 1\n if each.blood < 0:\n Music.bang_sound.play()\n each.being = False\n self.enemyTanksGroup.remove(each)\n self.enemyTanks_now -= 1\n self.tanksGroup.remove(each)\n tank.bullet.live = False\n break\n else:\n self.tanksGroup.remove(each)\n self.myTanksGroup.remove(each)\n self.enemyTanksGroup.remove(each)\n # 子弹碰撞砖墙\n if pygame.sprite.spritecollide(tank.bullet, self.brickGroup, True, None):\n tank.bullet.live = False\n\n # 子弹碰撞铁墙\n if pygame.sprite.spritecollide(tank.bullet, self.ironGroup, False, None):\n tank.bullet.live = False\n\n # 子弹碰大本营\n if pygame.sprite.collide_rect(tank.bullet, self.home):\n tank.bullet.live = False\n self.game_over = True\n\n\n# 开始界面显示\ndef show_start_interface(screen, width, height):\n t_font = pygame.font.Font('./font/simkai.ttf', width//6)\n c_font = pygame.font.Font('./font/simkai.ttf', width//20)\n title = t_font.render(u'坦克大战', True, (255, 0, 0))\n content1 = c_font.render(u'按1键进入单人游戏', True, (0, 0, 255))\n content2 = c_font.render(u'按2键进入双人人游戏', True, (0, 0, 255))\n content3 = c_font.render(u'按3键静音进入游戏', True, (0, 0, 255))\n content4 = c_font.render(u'按4键静音进入地狱模式', True, (0, 0, 255))\n t_rect = title.get_rect()\n t_rect.midtop = (width/2, height/4)\n crect1 = content1.get_rect()\n crect1.midtop = (width/2, height/1.8)\n crect2 = content2.get_rect()\n crect2.midtop = (width/2, height/1.6)\n crect3 = content3.get_rect()\n crect3.midtop = (width / 2, height / 1.45)\n crect4 = content4.get_rect()\n crect4.midtop = (width / 2, height / 1.34)\n screen.blit(title, t_rect)\n screen.blit(content1, crect1)\n screen.blit(content2, crect2)\n screen.blit(content3, crect3)\n screen.blit(content4, crect4)\n pygame.display.update()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_1:\n return 1\n if event.key == pygame.K_2:\n return 2\n if event.key == pygame.K_3:\n return 3\n if event.key == pygame.K_4:\n return 4\n\n\ndef show_end_interface(screen, width, height, is_win):\n screen.fill((0, 0, 0))\n if is_win:\n font = pygame.font.Font('./font/simkai.ttf', width//10)\n content = font.render(u'恭喜通关!', True, (255, 0, 0))\n rect = content.get_rect()\n rect.midtop = (width/2, height/2)\n screen.blit(content, rect)\n else:\n fail_img = pygame.image.load(\"./images/others/gameover.png\")\n rect = fail_img.get_rect()\n rect.midtop = (width/2, height/2)\n screen.blit(fail_img, rect)\n pygame.display.update()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n\n# 关卡切换\ndef show_switch_stage(screen, width, height, stage):\n screen.fill((0, 0, 0))\n font = pygame.font.Font('./font/simkai.ttf', width//10)\n content = font.render(u'第%d关' % stage, True, (0, 255, 0))\n rect = content.get_rect()\n rect.midtop = (width/2, height/2)\n screen.blit(content, rect)\n pygame.display.update()\n delay_event = pygame.constants.USEREVENT\n pygame.time.set_timer(delay_event, 1000)\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == delay_event:\n return\n\n\npygame.init()\n\n\nclass Music:\n # 加载音效\n bang_sound = pygame.mixer.Sound(\"./audios/bang.wav\")\n bang_sound.set_volume(1)\n fire_sound = pygame.mixer.Sound(\"./audios/fire.wav\")\n fire_sound.set_volume(1)\n start_sound = pygame.mixer.Sound(\"./audios/start.wav\")\n start_sound.set_volume(1)\n\n\ndef main_game():\n pygame.init()\n screen = pygame.display.set_mode((630, 630))\n pygame.display.set_caption(\"坦克大战\")\n clock = pygame.time.Clock()\n screen.fill((0, 0, 0))\n # 开始界面\n num_player = show_start_interface(screen, 630, 630)\n # 播放游戏开始的音乐\n Music.start_sound.play()\n # 关卡\n stage = 0\n num_enemyTank = 8\n num_bigTank = 4\n # 游戏主循环\n game_over = False\n while not game_over:\n # 关卡\n stage += 1\n num_bigTank += 1\n num_enemyTank += 1\n show_switch_stage(screen, 630, 630, stage)\n # 初始化地图类\n map_ = Map(num_enemyTank)\n map_.stage()\n factory = EnemyTankFactory(num_enemyTank, num_bigTank)\n # 生成己方坦克\n myTank = MyTank(24 * 8, 24 * 24, 3)\n map_.tanksGroup.add(myTank)\n map_.myTanksGroup.add(myTank)\n map_.init_enemy_tank(factory)\n # 定义生成敌方坦克事件\n genEnemyEvent = pygame.constants.USEREVENT\n pygame.time.set_timer(genEnemyEvent, 100)\n while True:\n if game_over:\n break\n if map_.enemyTanks_total < 1 and map_.enemyTanks_now < 1:\n break\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == genEnemyEvent:\n map_.create_new_enemy_tank(factory)\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n myTank.direction = 'L'\n myTank.stop = False\n elif event.key == pygame.K_RIGHT:\n myTank.direction = 'R'\n myTank.stop = False\n elif event.key == pygame.K_UP:\n myTank.direction = 'U'\n myTank.stop = False\n elif event.key == pygame.K_DOWN:\n myTank.direction = 'D'\n myTank.stop = False\n elif event.key == pygame.K_SPACE:\n if not myTank.bullet.live:\n myTank.fire()\n Music.fire_sound.play()\n\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key == pygame.K_DOWN or event.key == pygame.K_UP:\n myTank.stop = True\n\n # 背景\n screen.fill((0, 0, 0))\n map_.status_display(screen)\n # 我方坦克\n if myTank in map_.myTanksGroup:\n if not myTank.stop:\n map_.tanksGroup.remove(myTank)\n myTank.move([map_.tanksGroup, map_.brickGroup, map_.ironGroup], map_.home)\n map_.tanksGroup.add(myTank)\n screen.blit(myTank.img, myTank.rect)\n map_.tank_bullet_display(screen)\n game_over = map_.game_over\n pygame.display.flip()\n clock.tick(60)\n # 结束界面\n show_end_interface(screen, 630, 630, False)\n\n\nif __name__ == \"__main__\":\n main_game()\n","repo_name":"shenaky/TankBattle","sub_path":"tank.py","file_name":"tank.py","file_ext":"py","file_size_in_byte":21326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41433488365","text":"import unittest\nimport SimpleDocumentStore\n\n\nclass Tests(unittest.TestCase):\n bck_name = \"bucket_name\"\n local_path = \"local_path\"\n aws_access_key = \"aws_access_key\"\n aws_secret_key = \"aws_secret_key\"\n\n afsm = SimpleDocumentStore.create_storage_mode(local_path,\n 'aws',\n aws_access_key,\n aws_secret_key\n )\n fsm = SimpleDocumentStore.create_storage_mode(local_path)\n\n def test_save_doc(self):\n Tests.afsm.save_document(\"test1.pdf\", \"test1\", bucket_name=Tests.bck_name)\n Tests.fsm.save_document(\"test1.pdf\", \"test1\")\n\n def test_open_doc(self):\n f = Tests.afsm.open_document(\"test1.pdf\", 'r', bucket_name=Tests.bck_name)\n f.read()\n f = Tests.fsm.open_document(\"test1.pdf\", 'r')\n f.read()\n\n def test_rename_doc(self):\n Tests.afsm.rename_document(\"test1\", \"test11\", bucket_name=Tests.bck_name)\n Tests.fsm.rename_document(\"test1\", \"test11\")\n\n def test_get_size(self):\n s3s = Tests.afsm.get_size(\"test11\", bucket_name=Tests.bck_name)\n s = Tests.fsm.get_size(\"test11\")\n\n def test_get_creation_time(self):\n s3s = Tests.afsm.get_creation_time(\"test11\", bucket_name=Tests.bck_name)\n s = Tests.fsm.get_creation_time(\"test11\")\n a = s\n\n def test_get_modified_time(self):\n s3s = Tests.afsm.get_updated_time(\"test11\", bucket_name=Tests.bck_name)\n s = Tests.fsm.get_updated_time(\"test11\")\n a = s","repo_name":"reetikaSR/SimpleDocumentStore","sub_path":"Tests.py","file_name":"Tests.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"73227728906","text":"\"\"\"\nA strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).\n\nWrite a function to determine if a number is strobogrammatic. The number is represented as a string.\n\nExample 1:\n\nInput: \"69\"\nOutput: true\nExample 2:\n\nInput: \"88\"\nOutput: true\nExample 3:\n\nInput: \"962\"\nOutput: false\n\n\"\"\"\n\n\"\"\"\n1 6 8 9 0\n10 -> no\n11 -> yes\n69 -> yes\n\n\"\"\"\n# Time Complexity: O(n) where n is the length of the string\n# Space Complexity: O(n)\n\n\nclass Solution(object):\n def isStrobogrammatic(self, num):\n \"\"\"\n :type num: str\n :rtype: bool\n \"\"\"\n non_strobo_set = {\"2\",\"3\",\"4\",\"5\",\"7\"} \n rev_num = num[::-1]\n \n for i in range(0,len(num)):\n if num[i] in non_strobo_set:\n return False\n if num[i] == '6' and rev_num[i] != '9':\n return False\n elif num[i] == '9' and rev_num[i] != '6':\n return False\n \n elif num[i] != '9' and num[i] != '6':\n if num[i] != rev_num[i]:\n return False\n \n \n return True\n \n \n \n \n \n ","repo_name":"christian-miljkovic/interview","sub_path":"Leetcode/Algorithms/Easy/Strings/IsStringUpsideDown.py","file_name":"IsStringUpsideDown.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4563415109","text":"import click\nimport boto3\n\ndef add(x,y):\n return x+y\n\n@click.command()\ndef buckets():\n \"\"\"This lists my AWS S3 buckets\"\"\"\n\n s3 = boto3.client(\"s3\")\n all_buckets = s3.list_buckets()\n for bucket in all_buckets['Buckets']:\n click.echo(\n click.style(f\"bucket: {bucket['Name']}\", bg=\"yellow\", fg=\"blue\")\n )\n\n\nif __name__ == \"__main__\":\n buckets()","repo_name":"noahgift/devops-from-zero","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23725011408","text":"try:\n from PIL import Image, ImageDraw\n\nexcept ImportError:\n import Image\n import ImageDraw\n\nimport logging\nimport math\nimport numpy as np\n\nfrom spatial import gdal_import, ogr_import\nfrom spatial import gdal_reader as gdalr\nfrom spatial.gdal_reader import GdalReader\nfrom toolbox.vector import ogr_utils\n\nfrom spatial import ogr_reader as ogrr\n\ntry:\n from osgeo import gdal, ogr, gdal_array\n from osgeo import osr\n from osgeo import ogr\n\nexcept ImportError:\n import gdal\n import osr\n import ogr\n\n# from gdal import *\n# from gdalconst import *\n\n\ndef single_bands_to_multiband(gdal_bands_list, output=None):\n \"\"\" Convert several gdal single file to a single gdal datasource\"\"\"\n\n # Get mask\n # src_ds = gdal.OpenShared(gdal_bands_list[0])\n src_ds = GdalReader().gdal2ds(gdal_bands_list[0])\n # src_ds.SetMetadataItem('FilePath', gdal_bands_list[0])\n\n # Get output\n if not output:\n output = 'image_multiband'\n\n # Create new ds\n # tmp_ds = gdal.GetDriverByName('MEM').CreateCopy('', src_ds, 0)\n out_ds = gdalr.GdalReader().create_ds(output, src_ds.RasterXSize, src_ds.RasterYSize, len(gdal_bands_list),\n src_ds.GetRasterBand(1).DataType)\n\n out_ds.SetProjection(src_ds.GetProjection())\n out_ds.SetGeoTransform(src_ds.GetGeoTransform())\n\n i = 0\n for band_dataset in gdal_bands_list:\n\n i = i + 1\n band_ds = GdalReader().gdal2ds(band_dataset)\n # mask_ds = gdal.OpenShared(band_dataset)\n array_i = band_ds.GetRasterBand(1).ReadAsArray()\n\n out_ds.GetRasterBand(i).WriteArray(array_i)\n band_ds = None\n del array_i\n\n if GdalReader().isvalid(out_ds):\n return out_ds\n\n # file_path = out_ds.GetMetadataItem('FilePath')\n # if driver.ShortName == 'MEM':\n # return out_ds\n #\n # elif os.path.exists(file_path):\n # return file_path\n #\n # else:\n # logging.warning(\"Failed to create GDAL file. \" + str(file_path))\n # return\n\n\ndef merge(src_ds_list, outname, smooth_edges=False):\n # First layer metadata\n src_ds = GdalReader().gdal2ds(src_ds_list[0])\n src_ds_list = [GdalReader().gdal2ds(r) for r in src_ds_list]\n geotransform = src_ds.GetGeoTransform()\n xres = geotransform[1]\n yres = geotransform[5]\n projection = src_ds.GetProjection()\n nodata = src_ds.GetRasterBand(1).GetNoDataValue()\n\n # Get common extent\n xmin, xmax, ymin, ymax = GdalReader().get_extent(src_ds)\n\n bands = src_ds.RasterCount\n\n for src_i in src_ds_list:\n\n xmin_i, xmax_i, ymin_i, ymax_i = GdalReader().get_extent(src_i)\n xmax = max(xmax, xmax_i)\n xmin = min(xmin, xmin_i)\n ymax = max(ymax, ymax_i)\n ymin = min(ymin, ymin_i)\n bands = max(bands, src_i.RasterCount)\n\n # Aligne Pixels\n xmin = math.floor(xmin / xres) * xres\n xmax = math.ceil(xmax / xres) * xres\n ymin = math.floor(ymin / -yres) * -yres\n ymax = math.ceil(ymax / -yres) * -yres\n\n # Create output if it does not already exist.\n geotransform = [xmin, xres, 0, ymax, 0, yres]\n xsize = int(math.ceil((xmax - xmin) / xres))\n ysize = int(math.ceil((ymin - ymax) / yres))\n\n # Copy data from source files into output file 1.\n out_array = np.empty((ysize, xsize))\n out_array[:] = np.nan\n\n borders = []\n for src_i in src_ds_list:\n for band in xrange(1, src_ds.RasterCount + 1):\n\n geotransform_i = src_i.GetGeoTransform()\n if not int(xres) == int(geotransform_i[1]) or not int(yres) == int(geotransform_i[5]):\n logging.error('Merge cannot be performed because the layer resolution are different: ' +\n str(xres) + ',' + str(yres) + ' vs. ' + str(geotransform_i[1]) + ','\n + str(geotransform_i[5]))\n\n continue\n\n xmin_i, xmax_i, ymin_i, ymax_i = GdalReader().get_extent(src_i)\n xoff = int(math.ceil((xmin_i - xmin) / xres))\n yoff = int(math.ceil((ymax_i - ymax) / yres))\n\n x_size_i = src_i.RasterXSize\n y_size_i = src_i.RasterYSize\n array_i = GdalReader().ds2array(src_i)\n\n out_array[yoff:yoff + y_size_i, xoff:xoff + x_size_i] = array_i\n\n #slice_i = out_array[yoff:yoff + y_size_i, xoff:xoff + x_size_i]\n #out_array[yoff:yoff + y_size_i, xoff:xoff + x_size_i] = np.where(\n # np.ma.getmask(np.ma.masked_invalid(array_i[band - 1])), slice_i, array_i[band - 1])\n\n # Edges smoothing\n if smooth_edges:\n mask = np.where(np.ma.getmask(np.ma.masked_invalid(array_i[band - 1])), np.nan, 1)\n\n if smooth_edges:\n borders_i = [([i + yoff, j + xoff]\n if 0 < [mask[i - 1, j], mask[i + 1, j], mask[i, j - 1], mask[i, j + 1]].count(1) < 4\n else None)\n if (1 <= i < mask.shape[0] - 1) and (1 <= j < mask.shape[1] - 1) else None\n # ([i+yoff, j+xoff] if mask[i, j] == 1 else None)\n for i, j in np.ndindex(mask.shape)]\n borders = borders + borders_i\n\n # Edges smoothing\n if smooth_edges:\n for k in borders:\n if k:\n out_array[k[0], k[1]] = np.nanmean(out_array[k[0] - 1:k[0] + 1, k[1] - 1:k[1] + 1])\n\n return GdalReader().array2ds(src_array=out_array, output=outname, projection=projection, geotransform=geotransform,\n nodata=nodata)\n\n\ndef poly_clip(raster, polygons, outuput):\n \"\"\"Clip raster with polygons\"\"\"\n\n src_ds = gdal_import.src2ds(raster)\n poly_ds = ogr_import.src2ogr(polygons)\n\n # 1.- Reproject vector geometry to same projection as raster\n projection = src_ds.GetProjection()\n poly_reprojected = ogr_utils.reproject(poly_ds, wtk_projection=projection, outname='polygons_reprojected')\n\n poly_ds = ogr_import.src2ogr(poly_reprojected)\n poly_lyr = poly_ds.GetLayer()\n\n # Bound box (debbuging code)\n # geom_type = poly_lyr.GetGeomType()\n # outDataSource = ogrr.create_layer('bound_box', geom_type=geom_type, wkt_proj=projection, file_path=None)\n # outLayer = outDataSource.GetLfpayer()\n # outLayerDefn = outLayer.GetLayerDefn()\n # outFeature = ogr.Feature(outLayerDefn)\n # outFeature.SetGeometry(geom)\n # outLayer.CreateFeature(outFeature)\n # outFeature = None\n # outDataSource = None\n\n # 2.- Filter and extract features\n # Get Raster Extent\n nodata = src_ds.GetRasterBand(1).GetNoDataValue()\n\n r_min_x, r_max_x, r_min_y, r_max_y = GdalReader().get_extent(src_ds)\n\n wkt = 'POLYGON((' + ','.join([' '.join([str(r_min_x), str(r_max_y)]), ' '.join([str(r_min_x), str(r_min_y)]),\n ' '.join([str(r_max_x), str(r_min_y)]), ' '.join([str(r_max_x), str(r_max_y)]),\n ' '.join([str(r_min_x), str(r_max_y)])]) + '))'\n\n geom = ogr.CreateGeometryFromWkt(wkt)\n\n poly_lyr.SetSpatialFilter(geom)\n mem_driver = ogr.GetDriverByName('MEMORY')\n filtered_poly_ds = mem_driver.CreateDataSource('filered_polygons')\n\n # Open the memory datasource with write access and copy content\n mem_driver = ogr.GetDriverByName('MEMORY')\n mem_driver.Open('filered_polygons', 1)\n filtered_poly_ds.CopyLayer(poly_lyr, 'filered_polygons', ['OVERWRITE=YES'])\n\n poly_lyr.SetSpatialFilter(None)\n\n # Intersect geometries with boundary box\n geom_type = poly_lyr.GetGeomType()\n clipped_poly_ds = ogrr.create_layer('clipped_polygons', geom_type=geom_type, wkt_proj=projection, file_path=None)\n\n clipped_poly_lyr = clipped_poly_ds.GetLayer()\n filtered_poly_lyr = filtered_poly_ds.GetLayer()\n\n clipped_lyr_defn = clipped_poly_lyr.GetLayerDefn()\n infeature = filtered_poly_lyr.GetNextFeature()\n while infeature:\n feat_geom = infeature.GetGeometryRef()\n intersection_geom = feat_geom.Intersection(geom)\n\n out_feature = ogr.Feature(clipped_lyr_defn)\n out_feature.SetGeometry(intersection_geom)\n clipped_poly_lyr.CreateFeature(out_feature)\n\n out_feature = None\n infeature = filtered_poly_lyr.GetNextFeature()\n\n filtered_poly_lyr.ResetReading()\n filtered_poly_lyr = None\n\n # Bound box (debbuging code)\n # geom_type = poly_lyr.GetGeomType()\n # filtered_poly_ds = ogrr.create_layer('filered_polygons', geom_type=geom_type, wkt_proj=projection,\n # file_path=None)\n\n # Clip raster to layer extent\n lyr = clipped_poly_lyr\n extent = lyr.GetExtent()\n\n # Convert the _vector extent to image pixel coordinates\n geo_trans = src_ds.GetGeoTransform()\n # projection = rds.GetProjection()\n ul_x, ul_y = GdalReader().world2pixel(geo_trans, extent[0], extent[3])\n lr_x, lr_y = GdalReader().world2pixel(geo_trans, extent[1], extent[2])\n\n # Create a new geomatrix for the _raster\n geo_trans = list(geo_trans)\n geo_trans[0] = extent[0]\n geo_trans[3] = extent[3]\n\n # Get the new array to layer extent\n rarray = gdal_array.DatasetReadAsArray(src_ds)\n\n if len(rarray.shape) == 3:\n clip = rarray[:, ul_y:lr_y, ul_x:lr_x]\n\n elif len(rarray.shape) == 2:\n clip = rarray[ul_y:lr_y, ul_x:lr_x]\n\n else:\n return logging.error('Error in array shape.')\n\n new_array = clip_raster_array(vds=filtered_poly_ds, raster_array=clip, geotransform=geo_trans, nodata=nodata)\n\n return GdalReader().array2ds(src_array=np.array(new_array), output=outuput, geotransform=geo_trans,\n projection=projection, nodata=nodata)\n\n\ndef clip_raster_array(vds, raster_array, geotransform, nodata=0):\n\n if len(raster_array.shape) == 3:\n\n nbands = raster_array.shape[0]\n\n else:\n nbands = 1\n\n new_array = []\n layer = vds.GetLayer(0)\n layer.ResetReading()\n\n for band in xrange(nbands):\n\n if len(raster_array.shape) == 3:\n raster_array_i = raster_array[band]\n\n else:\n raster_array_i = raster_array\n\n ysize, xsize = raster_array_i.shape\n\n # Create data mask\n rasterpoly = Image.new(\"L\", (xsize, ysize), 1)\n raster_im = ImageDraw.Draw(rasterpoly)\n inner_ring_im = ImageDraw.Draw(rasterpoly)\n\n # for fid in xrange(layer.GetFeatureCount()):\n # feature = layer.GetFeature(fid)\n for feature in layer:\n geoms = feature.GetGeometryRef()\n\n if geoms.GetGeometryName().lower() == \"multipolygon\":\n for geom in geoms:\n\n pts = geom.GetGeometryRef(0)\n points = [(pts.GetX(p), pts.GetY(p)) for p in range(pts.GetPointCount())]\n pixels = [GdalReader().world2pixel(geotransform, p[0], p[1]) for p in points]\n raster_im.polygon(pixels, 0)\n\n if geom.GetGeometryCount() > 1:\n for i in xrange(1, geom.GetGeometryCount()):\n pts = geom.GetGeometryRef(i)\n points1 = [(pts.GetX(p), pts.GetY(p)) for p in range(pts.GetPointCount())]\n pixels1 = [GdalReader().world2pixel(geotransform, p[0], p[1]) for p in points1]\n\n inner_ring_im.polygon(pixels1, 1)\n\n elif geoms.GetGeometryName().lower() == \"polygon\":\n pts = geoms.GetGeometryRef(0)\n points = [(pts.GetX(p), pts.GetY(p)) for p in range(pts.GetPointCount())]\n pixels = [GdalReader().world2pixel(geotransform, p[0], p[1]) for p in points]\n\n raster_im.polygon(pixels, 0)\n\n del feature\n\n layer.ResetReading()\n\n # Image to array\n try:\n # old version\n mask = np.fromstring(rasterpoly.tostring(), 'b')\n\n except:\n # new version\n mask = np.fromstring(rasterpoly.tobytes(), 'b')\n\n mask.shape = rasterpoly.im.size[1], rasterpoly.im.size[0]\n\n # Clip the image using the mask (Note that np.uint8 does not allow nan values\n new_array.append(np.choose(mask, (raster_array_i, nodata)).astype(np.float))\n\n return np.array(new_array)\n","repo_name":"TreeMetrics/ForestChange","sub_path":"forest_change/toolbox/raster/gdal_utils.py","file_name":"gdal_utils.py","file_ext":"py","file_size_in_byte":12314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8069477548","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nimport cv2\nimport os\nfrom os.path import join\nfrom tqdm import tqdm\nfrom sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport json\n\n\n# Load data\ndef read_data(path):\n n_class = 30\n data = []\n class_table = {}\n for i, class_ in enumerate(os.listdir(path)):\n class_table[i] = class_\n class_ = join(path, class_)\n imgs = [cv2.imread(join(class_, file)) for file in os.listdir(class_) if file[0] != '.']\n data.append(imgs)\n return data, class_table\n\ndata, class_table = read_data('./database')\n\n\n# Evaluate\ndef cosine_similarity(a, b):\n return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))\n\ndef get_AP(similarity, cur_class):\n ## calculate AP\n total, correct = 0, 0\n AP = 0.0\n for j in range(len(similarity)):\n total += 1\n if similarity[j][1] == cur_class:\n correct += 1\n AP += correct / total\n AP /= correct\n return AP\n \ndef evaluate(feature):\n n = len(feature)\n cur_class = 0\n AP_class = []\n tmp_AP, count = 0.0, 0\n \n for i in range(n):\n similarity = []\n for j in range(n):\n if j == i:\n continue\n sim = cosine_similarity(feature[i][0], feature[j][0])\n similarity.append([sim, feature[j][1]])\n similarity = sorted(similarity, key=lambda x: x[0], reverse=True)\n \n ap = get_AP(similarity, feature[i][1])\n\n if feature[i][1] != cur_class:\n AP = tmp_AP / count\n AP_class.append([AP, cur_class])\n # init class & AP\n cur_class = feature[i][1]\n tmp_AP = ap\n count = 1\n else:\n tmp_AP += ap\n count += 1\n \n ## output result\n MAP = 0.0\n AP_class = sorted(AP_class, key=lambda x:x[0], reverse=True)\n for i in range(len(AP_class)):\n print('Class: {a}, AP: {b}'.format(a = class_table[AP_class[i][1]], b = AP_class[i][0]))\n MAP += AP_class[i][0]\n MAP /= len(AP_class)\n print('MAP: {a}'.format(a = MAP))\n\n\n# Color feature\ndef color_histogram(data):\n feature = []\n for i in tqdm(range(len(data))):\n for j in range(len(data[i])):\n hist = []\n for channel in range(3):\n hist.extend(cv2.calcHist([data[i][j]], [channel], None, [16], [0, 256]))\n hist = np.array(hist)\n hist = hist.flatten()\n feature.append([hist, i])\n # normalize\n hist_features = np.array([f[0] for f in feature])\n for i in range(len(hist_features[0])):\n mean = np.mean(hist_features[:, i], axis=0)\n std = np.std(hist_features[:, i], axis=0)\n hist_features[:, i] = (hist_features[:, i] - mean) / std\n for i in range(len(feature)):\n feature[i][0] = hist_features[i]\n return feature\n\n## get features and evaluate\ncolorFeature = color_histogram(data)\nprint('[Color feature]')\nevaluate(colorFeature)\n\n## plot feature histogram - take 30th for example\nplt.bar(np.arange(len(colorFeature[30][0])), colorFeature[30][0])\nplt.title('example feature')\nplt.show()\n\n\n# Texture feature\ndef build_filters():\n filters = []\n ksize = [7, 9, 11, 13, 15] # gabor尺度 - 5個\n lamda = np.pi/2.0 # 波長\n for theta in np.arange(0, np.pi, np.pi / 4): # gabor方向 - 4個\n for K in ksize:\n kernel = cv2.getGaborKernel((K, K), 1.0, theta, lamda, 0.5, 0, ktype=cv2.CV_32F)\n kernel /= 1.5 * kernel.sum()\n filters.append(kernel)\n return filters\n\ndef process(img, filters):\n accum = np.zeros_like(img)\n for kernel in filters:\n filtered = cv2.filter2D(img, cv2.CV_8UC3, kernel)\n np.maximum(accum, filtered, accum)\n return accum\n\n### Gabor feature extraction\ndef getGabor(img, filters):\n feature = []\n for i in range(len(filters)):\n tmp = process(img, filters[i])\n mean, std = np.mean(tmp), np.std(tmp)\n feature.append(mean)\n feature.append(std)\n feature = np.array(feature)\n return feature\n\ndef gabor_filter(data, filters):\n feature = []\n for i in tqdm(range(len(data))):\n for j in range(len(data[i])):\n feat = getGabor(data[i][j], filters)\n feature.append([feat, i])\n return feature\n\n## get features and evaluate\nfilters = build_filters()\ntextFeature = gabor_filter(data, filters)\nprint('[Texture feature]')\nevaluate(textFeature)\n\n## save features\nfor i in range(len(textFeature)):\n textFeature[i][0] = textFeature[i][0].tolist()\nwith open('texture.json', 'w') as f:\n json.dump(textFeature, f)\n\n\n## load features\nwith open('texture.json', 'r') as f:\n textFeature = json.load(f)\n\n## plot feature distribution\nprint('[Visualization]')\nfeature = np.array([ i[0] for i in textFeature])\nlabel = [ i[1] for i in textFeature]\nembedded = TSNE(n_components=2).fit_transform(feature)\n## init color map\ncmap = cm.rainbow(np.linspace(0.0, 1.0, max(label)+1))\nnp.random.shuffle(cmap)\nplt.scatter(embedded[:, 0], embedded[:, 1], marker='o', color=cmap[label[:]])\nplt.title('feature distribution')\nplt.show()\n\n\n# Local feature\n\ndef getSift(img):\n # create sift and compute features\n sift = cv2.xfeatures2d.SIFT_create(nfeatures=500)\n kp, features = sift.detectAndCompute(img, None)\n return features\n\ndef sift_with_kmeans_clustering(data):\n n_clusters = 256\n feature = []\n all_features = []\n for i in tqdm(range(len(data))):\n for j in range(len(data[i])):\n feat = getSift(data[i][j])\n feature.append([feat[n] for n in range(feat.shape[0])])\n for n in range(feat.shape[0]):\n all_features.append(feat[n])\n # apply kmeans clustering\n kmeans = KMeans(n_clusters=n_clusters).fit(all_features)\n return feature, kmeans\n\ndef retrieve_codebooks(feature, kmeans):\n cur = 0\n codebooks = []\n n_clusters = 256\n # retrieve codebook for each feature\n for i in tqdm(range(len(data))):\n for j in range(len(data[i])):\n tmp_feature = [0.0 for _ in range(n_clusters)]\n clusters = kmeans.predict(feature[cur])\n for n in range(clusters.shape[0]):\n tmp_feature[clusters[n]] += 1\n tmp_feature = [x / clusters.shape[0] for x in tmp_feature]\n codebooks.append([tmp_feature, i])\n cur += 1\n return codebooks\n\n# get features and evaluate\nlocalFeature, kmeans = sift_with_kmeans_clustering(data)\ncodebooks = retrieve_codebooks(localFeature, kmeans)\nprint('[Local feature]')\nevaluate(codebooks)\n\n# save features\nwith open('localFeature.json', 'w') as f:\n json.dump(codebooks, f)\n\n\n# load features\nwith open('localFeature.json', 'r') as f:\n localFeature = json.load(f)\n\n# plot feature distribution\nprint('[Visualization]')\nfeature = np.array([ i[0] for i in localFeature])\nlabel = [ i[1] for i in localFeature]\nembedded = TSNE(n_components=2).fit_transform(feature)\n# Init color map\ncmap = cm.rainbow(np.linspace(0.0, 1.0, max(label)+1))\nnp.random.shuffle(cmap)\nplt.scatter(embedded[:, 0], embedded[:, 1], marker='o', color=cmap[label[:]])\nplt.title('feature distribution')\nplt.show()\n","repo_name":"Andychen3558/CC2019","sub_path":"hw2/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":7276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71760414025","text":"import tkinter as tk\n\n# CRIANDO A JANELA\njanela = tk.Tk()\njanela.geometry('640x480')\njanela.title('Gerenciador de Frases')\n\n# CRIANDO FRAME\nframe = tk.Frame(janela)\nframe.pack(padx=100, pady=10, fill='x', expand=True)\n\n# CRIANDO O LABEL\nlabel = tk.Label(frame, text='Olá Mundo')\nlabel.pack(fill='x', expand=True)\n\n# TEXTO ALTERADO\nfrase = tk.Label(frame, text='')\nfrase.pack(fill='x', expand=True)\nfraseInput = tk.Entry(frame)\nfraseInput.pack(fill='x', expand=True)\n\n# FUNÇÃO PARA TROCA DE FRASE NO LABEL\ndef trocar():\n frase.config(text=fraseInput.get())\n\n# CRIANDO BOTAO\nbotao = tk.Button(frame, text='Alterar', command=trocar)\nbotao.pack(fill='x', pady=25, expand=True)\n\n\n\n# INICIANDO O SISTEMA\n\njanela.mainloop()","repo_name":"guerramg/treinosPython","sub_path":"2 - Modulos/moduloTkinterGUI.py","file_name":"moduloTkinterGUI.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30558211191","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nOther geometries\nCreated 08.12.17 by Abdulla Gaibullaev.\nSite: http://ag-one.ru\n\"\"\"\n\nimport numpy as np\nfrom point import Point\n\n\nclass Ray(object):\n def __init__(self, start, direction):\n self.start = start\n self.direction = direction\n self.direction.normalize()\n\n def get_closest_index(self, points):\n \"\"\" return closest point to ray (only forward points) \"\"\"\n info = [(i, (p-self.start).length()) for i, p in enumerate(points) if self.direction.dot(p-self.start) > 0]\n info = [x for x in info if x[1] > 1e-6]\n return min(info, key=lambda x: x[1])[0] if info else None\n\n def __repr__(self):\n return ''.format(self.start, self.direction)\n\n\nclass Object3D(object):\n def intersect(self, ray):\n \"\"\" must return None or tuple(point, normal) \"\"\"\n raise NotImplementedError()\n\n\nclass Rectangle(Object3D):\n def __init__(self, center, width, height, vec_up, normal):\n self.center = center\n self.w = width\n self.h = height\n self.vec_up = vec_up.normalized() / (self.h/2.)\n self.normal = normal.normalized()\n self.vec_right = self.vec_up.cross(self.normal).normalized() / (self.w/2.)\n\n def intersect(self, ray):\n denom = self.normal.dot(ray.direction)\n if abs(denom) > 1e-6:\n p0l0 = self.center - ray.start; \n t = p0l0.dot(self.normal) / denom;\n if t < 0:\n return None\n else:\n return None\n\n intersection_point = ray.start + ray.direction * t\n rel_point = intersection_point - self.center\n y = rel_point.dot(self.vec_up)\n x = rel_point.dot(self.vec_right)\n\n if abs(y) > 1.0 or abs(x) > 1.0:\n return None\n\n normal = self.normal.copy()\n if ray.direction.dot(self.normal) > 0:\n normal = -normal\n\n return intersection_point, normal\n\n def rotate(self, angle, rot_axis, center):\n old_vec = self.center - center\n new_vec = old_vec.rotate(angle, rot_axis)\n\n self.center = center + new_vec\n self.vec_up = self.vec_up.rotate(angle, rot_axis)\n self.normal = self.normal.rotate(angle, rot_axis)\n self.vec_right = self.vec_right.rotate(angle, rot_axis)\n\n\n","repo_name":"Alick09/Terminal-Render","sub_path":"terminal_render/geometry/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"73034269385","text":"# -*-coding:utf-8-*-\r\nfrom bs4 import BeautifulSoup\r\nfrom lxml import html\r\nimport requests\r\nimport re\r\nimport sys\r\nimport time\r\nimport math\r\nimport random\r\n\r\n\r\nclass weipu(object):\r\n \"\"\"docstring for weipu\"\"\"\r\n\r\n def __init__(self, header):\r\n self.header = self.formdict(file=header, sep=': +')\r\n self.header['Connection'] = 'close'\r\n self.headforget = {}\r\n self.headforget['Host'] = self.header['Host']\r\n self.headforget['User-Agent'] = self.header['User-Agent']\r\n self.headforget['Referer'] = self.header['Referer']\r\n self.headforget['Connection'] = 'close'\r\n\r\n def get_id(self, page, keyword):\r\n time.sleep(1 + random.randint(1, 3))\r\n tm = 0\r\n out = False\r\n while tm < 3:\r\n try:\r\n res = requests.get(\r\n url='http://qikan.cqvip.com/zk/search.aspx?from=zk_search&key=' +\r\n keyword + '&page=' + str(page) + '&size=50&ls=1#search-result-list',\r\n # url='http://qikan.cqvip.com/zk/search.aspx?from=zk_search&key=U%3D%E5%BE%AE%E7%94%9F%E7%89%A9&page=2&size=50&ls=1#search-result-list',\r\n headers=self.header, timeout=20)\r\n if res.status_code == 200:\r\n try:\r\n tree = html.fromstring(res.content)\r\n id_set = tree.xpath(\r\n '//input[@name=\"vcubeid\"]/@value')\r\n id_set = [id.strip() for id in id_set]\r\n totalpage = tree.xpath('//span[@class=\"total\"]/text()')[0]\r\n totalpage = int(re.sub(\",\", \"\", re.search(\"[\\d|,]+\", totalpage).group()))\r\n except IndexError:\r\n pass\r\n tm += 1\r\n if id_set:\r\n out = [totalpage, id_set]\r\n return(out)\r\n else:\r\n print('\\n Loading Error, tring another time')\r\n time.sleep(2)\r\n\r\n except Exception as e:\r\n print('Error occurred: ' + str(e) + 'Tring to fix is.')\r\n time.sleep(2)\r\n return(out)\r\n # print(res.text)\r\n\r\n def formdict(self, file, sep):\r\n d = {}\r\n for line in open(file, \"r\", encoding='utf-8'):\r\n line = re.sub('\\n$', '', line)\r\n if not re.search(\" $\", line):\r\n li = re.split(sep, line)\r\n d[li[0]] = li[1]\r\n return(d)\r\n\r\n def get_info(self, id):\r\n time.sleep(1 + random.randint(1, 4))\r\n while True:\r\n try:\r\n res = requests.get(url='http://qikan.cqvip.com/article/detail.aspx?id=%s&from=zk_search' %\r\n (str(id)), headers=self.headforget)\r\n tree = html.fromstring(res.content)\r\n title = tree.xpath('//h1/text()')[0].strip()\r\n if title:\r\n break\r\n else:\r\n print('\\n Loading Error, tring another time')\r\n time.sleep(1)\r\n except TimeoutError:\r\n print(\"TimeoutError, please wait for 2 seconds\")\r\n time.sleep(2)\r\n\r\n author = tree.xpath('//p[@class=\"author\"]/a/text()')\r\n author_s = tree.xpath('//p[@class=\"author\"]/span/a/text()')\r\n author = author + author_s\r\n\r\n if author:\r\n author = [at.strip() for at in author]\r\n organ = tree.xpath('//p[@class=\"organ\"]/a/text()')\r\n organ_s = tree.xpath('//p[@class=\"organ\"]/span/a/text()')\r\n organ = organ + organ_s\r\n if not organ:\r\n organ = \"\"\r\n\r\n try:\r\n info = tree.xpath('//p[@class=\"abstrack\"]/text()')[1].strip()\r\n info = re.sub('[\\n\\r]+', '; ', info)\r\n except IndexError:\r\n info = \"\"\r\n cinfo = info\r\n\r\n info = re.sub(\".\", \".\", info)\r\n info = re.split(\"通[讯|信]作者\", info.strip())\r\n\r\n def findname(tar):\r\n for n in author:\r\n if re.search(n, tar):\r\n return(n)\r\n\r\n f_author = author[0].strip()\r\n if findname(info[0]):\r\n f_author = findname(info[0])\r\n\r\n m_author = '无法判断'\r\n try:\r\n if findname(info[1]):\r\n m_author = findname(info[1])\r\n except IndexError:\r\n pass\r\n\r\n f_email = '无'\r\n if re.search('[a-zA-Z0-9_\\-\\+..]+@[a-zA-Z0-9_\\-\\+..]+', info[0]):\r\n f_email = re.search('[a-zA-Z0-9_\\-\\+..]+@[a-zA-Z0-9_\\-\\+..]+', info[0]).group()\r\n f_email = f_email.strip('\\.|.')\r\n\r\n m_email = '无'\r\n try:\r\n if re.search('[a-zA-Z0-9_\\-\\+..]+@[a-zA-Z0-9_\\-\\+..]+', info[1]):\r\n m_email = re.search('[a-zA-Z0-9_\\-\\+..]+@[a-zA-Z0-9_\\-\\+..]+', info[1]).group()\r\n m_email = m_email.strip('\\.|.')\r\n except IndexError:\r\n pass\r\n\r\n f_tel = '无'\r\n if re.search('Tel:([0-9\\-—\\+]+)', info[0]):\r\n f_tel = re.search('Tel:([0-9\\-—\\+]+)', info[0]).group(1)\r\n f_tel = f_tel.strip('\\.|.|\\-')\r\n\r\n m_tel = '无'\r\n try:\r\n if re.search('Tel:([0-9\\-—\\+]+)', info[1]):\r\n m_tel = re.search('Tel:([0-9\\-—\\+]+)', info[1]).group(1)\r\n m_tel = m_tel.strip('\\.|.|\\-')\r\n except IndexError:\r\n pass\r\n\r\n return([f_email, f_author, m_email, m_author, ';'.join(author), cinfo, ';'.join(organ), title, f_tel, m_tel])\r\n else:\r\n return(False)\r\n\r\n # print(res.content)\r\n\r\n def formkeyword(self, table):\r\n keyword = []\r\n with open(table, 'r', encoding='utf-8') as infile:\r\n for line in enumerate(infile):\r\n li = re.sub('\\?', '', line[1])\r\n li = re.split('\\t', li.strip('\\n'))\r\n li = [l.strip() for l in li]\r\n if line[0] == 0:\r\n for w in enumerate(li):\r\n if w[1] == '负责人':\r\n an = w[0]\r\n elif w[1] == '依托单位':\r\n on = w[0]\r\n elif w[1] == '邮箱':\r\n en = w[0]\r\n else:\r\n keyword.append(['\\t'.join(li[0:11]), li[an], 'A%3D' + li[an], 'A%3D' +\r\n li[an] + '[*]' + 'S%3D' + li[on], li[en]])\r\n return(keyword)\r\n\r\n def main(self, table, maxtry=5):\r\n outpath = table + '_search_for_email.xls'\r\n try:\r\n with open(outpath, 'r', encoding='utf-8') as prefile:\r\n prepmid = []\r\n for line in prefile:\r\n prepmid.append(re.search('\\t([^\\t\\n]+)$', line).group(1).strip())\r\n\r\n except FileNotFoundError:\r\n prepmid = []\r\n\r\n pre_table = self.formkeyword(table=table)\r\n with open(outpath, 'a', encoding='utf-8') as outff:\r\n if not prepmid:\r\n outff.write('跟进助理\\t备注\\t项目名\\t负责人\\t职称\\t依托单位\\t经费\\t起始时间\\t领域\\t电话\\t邮箱\\t维普邮箱\\t文章标题\\t详细信息\\t维普机构\\t电话\\t查询方式\\t搜索关键词\\n')\r\n print('Start to get author information......')\r\n d = 0\r\n for origin, name, short_key, long_key, pre_email in pre_table:\r\n d = d + 1\r\n perctg = 100 * d / len(pre_table)\r\n done = int(50 * d / len(pre_table))\r\n sys.stdout.write(\"\\r[%s%s] %.3f%%\" % ('█' * done, ' ' * (50 - done), perctg))\r\n sys.stdout.flush()\r\n if long_key not in prepmid and name:\r\n finded_email = '无法找到邮箱\\t\\t\\t\\t\\t'\r\n if pre_email:\r\n finded_email = pre_email + '\\t\\t\\t\\t\\t'\r\n else:\r\n page = 0\r\n totalpage = 1\r\n ifound = True\r\n while page < totalpage and ifound and page < maxtry:\r\n id_set = []\r\n page += 1\r\n print(\"\\n Searching in page%s according to name and organization\" % (page))\r\n # sys.stdout.flush()\r\n finded_id = self.get_id(page=page, keyword=long_key)\r\n if finded_id:\r\n id_set = finded_id[1]\r\n totalpage = finded_id[0]\r\n ifound = finded_id\r\n\r\n for eachid in id_set:\r\n finded_info = self.get_info(id=eachid)\r\n if finded_info:\r\n if not finded_info[0] == '无' and name == finded_info[1]:\r\n finded_email = finded_info[0] + '\\t' + finded_info[7] + '\\t' + finded_info[5] + \\\r\n '\\t' + finded_info[6] + '\\t' + finded_info[8] + '\\t' + '根据姓名和单位查找'\r\n break\r\n elif not finded_info[2] == '无' and name == finded_info[3]:\r\n finded_email = finded_info[2] + '\\t' + finded_info[7] + '\\t' + finded_info[5] + \\\r\n '\\t' + finded_info[6] + '\\t' + finded_info[9] + '\\t' + '根据姓名和单位查找'\r\n break\r\n if not finded_email == '无法找到邮箱\\t\\t\\t\\t\\t':\r\n break\r\n\r\n if finded_email == '无法找到邮箱\\t\\t\\t\\t\\t':\r\n page = 0\r\n totalpage = 1\r\n ifound = True\r\n while page < totalpage and ifound and page < maxtry:\r\n id_set = []\r\n page += 1\r\n print(\"\\n Searching in page%s according to name\" % (page))\r\n # sys.stdout.flush()\r\n finded_id = self.get_id(page=page, keyword=short_key)\r\n if finded_id:\r\n id_set = id_set + finded_id[1]\r\n totalpage = finded_id[0]\r\n ifound = finded_id\r\n\r\n for eachid in id_set:\r\n finded_info = self.get_info(id=eachid)\r\n if finded_info:\r\n if not finded_info[0] == '无' and name == finded_info[1]:\r\n finded_email = finded_info[0] + '\\t' + finded_info[7] + '\\t' + finded_info[5] + \\\r\n '\\t' + finded_info[6] + '\\t' + finded_info[8] + '\\t' + '只根据名字查找'\r\n break\r\n elif not finded_info[2] == '无' and name == finded_info[3]:\r\n finded_email = finded_info[2] + '\\t' + finded_info[7] + '\\t' + finded_info[5] + \\\r\n '\\t' + finded_info[6] + '\\t' + finded_info[9] + '\\t' + '只根据名字查找'\r\n break\r\n if not finded_email == '无法找到邮箱\\t\\t\\t\\t\\t':\r\n break\r\n outff.write('%s\\t%s\\t%s\\n' % (origin, finded_email, long_key))\r\n\r\n\r\nif __name__ == '__main__':\r\n page = weipu(header='header.txt')\r\n page.main(table=sys.argv[1])\r\n","repo_name":"bayegy/Clamb","sub_path":"find_author_by_name_and_organ.py","file_name":"find_author_by_name_and_organ.py","file_ext":"py","file_size_in_byte":12137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35541390545","text":"#!/usr/bin/env python3\n\nimport os\nimport os.path\nimport sys\nimport pdb\nimport shutil\n\ndef relative_ln_s( from_, to_ ):\n \"\"\"\n\n This is just so dirty & boring: create a relative symlink, making the\n to_ path relative to from_. No errorchecks. Both arguments must be\n files, a destination directory doesn't work (I think). An existing\n file in to_ will be removed.\n\n \"\"\"\n prefix = os.path.commonprefix( [ to_, from_ ] )\n if prefix == '':\n prefix = '/'\n source = from_.split( prefix )[ 1 ]\n dest = to_.split( prefix )[ 1 ]\n level = len( dest.split( '/' ) ) - 1\n path = ( '../' * level ) + source\n return path\n\nUSAGE = 'Usage: make_rel_symlink [-p] '\n\njust_print = False;\nif sys.argv[1] == \"-p\":\n just_print = True;\n sys.argv = sys.argv[ 1:]\n\nif len( sys.argv ) != 3:\n print(USAGE)\n sys.exit( 1 )\n\nif os.path.isdir( sys.argv[2] ):\n print(\"Removing link target dir:\" + sys.argv[2])\n shutil.rmtree( sys.argv[2])\n\nlink_path = relative_ln_s( sys.argv[1], sys.argv[2] )\nif just_print:\n print(link_path)\nelse:\n os.chdir( os.path.dirname( sys.argv[2]))\n target = os.path.basename( sys.argv[2])\n if os.path.exists( target ):\n os.unlink( target)\n os.symlink( link_path, target)\n\n\n","repo_name":"andyvand/LIRC","sub_path":"tools/make_rel_symlink.py","file_name":"make_rel_symlink.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"22360788490","text":"# 팰린드롬수\nimport sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nwhile True:\n value = input().rstrip()\n if value == '0':\n break\n Q = deque(value)\n cmp_value = \"\"\n while Q:\n cmp_value += Q.pop()\n if value == cmp_value:\n print(\"yes\")\n else:\n print(\"no\")","repo_name":"zpqmdh/BOJ","sub_path":"data_structures/1259.py","file_name":"1259.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20040335397","text":"import os\nimport numpy as np\n# import pyttsx3\nimport re\nimport csv\nimport pandas as pd\nimport json\nfrom transformers import BertTokenizer\n\n# engine = pyttsx3.init()\n# engine.say(\"I will speak this text\")\n# engine.runAndWait()\n\n\ndef edit_text(text):\n text = re.sub('\\n', '', text).lower()\n return text\n\n\nedit_text_v = np.vectorize(edit_text)\n\n\nclass TrainingData:\n def __init__(self, batch_size):\n self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n self.added_vocab = ['@ax', '@ay', '@az', '@bx', '@by', '@bz', '@cx', '@cy', '@cz']\n self.tokenizer.add_tokens(self.added_vocab)\n\n self.batch_size = batch_size\n MARVEL_MOVIE_DIALOG_PATH = '/home/jskaggs93/Datasets/MarvelMovieDialogs/'\n CORNELL_MOVIE_DIALOG_PATH = '/home/jskaggs93/Datasets/CornellMovieDialog/'\n # TEXT_DATASET_FOR_NLP_PATH = '/home/jskaggs93/Datasets/TextDataSetforNLP/'\n GUTENBERG_DATASET_PATH = '/home/jskaggs93/Datasets/gutenberg_dataset_en/'\n\n # global variables\n self.word_buffer_len = 40\n\n # Cornell Movie Dataset\n cornel_movie_texts = open(CORNELL_MOVIE_DIALOG_PATH + 'movie_lines.txt').readlines()\n line_id, character_name = [], []\n for i in range(len(cornel_movie_texts)):\n line_id += [int(cornel_movie_texts[i].split('+++$+++')[0][1:])]\n character_name += [cornel_movie_texts[i].split('+++$+++')[3]]\n cornel_movie_texts[i] = cornel_movie_texts[i].split('+++$+++')[4]\n\n cornel_movie_texts = np.array(cornel_movie_texts)\n cornel_movie_texts = edit_text_v(cornel_movie_texts)\n self.cornel_line_id, self.cornel_character_id, self.cornel_text = zip(*sorted(zip(line_id, character_name, cornel_movie_texts)))\n self.cornell_idx = 0\n\n # Marvel Dataset\n marvel_movies = os.listdir(MARVEL_MOVIE_DIALOG_PATH)\n self.marvel_movie_texts = []\n for movie in marvel_movies:\n if movie != '.DS_Store':\n tmp = open(MARVEL_MOVIE_DIALOG_PATH + movie, encoding=\"utf8\")\n text = np.array(tmp.readlines())\n self.marvel_movie_texts += [edit_text_v(text)]\n self.marvel_current_movie_idx = 0\n self.marvel_current_movie_text_idx = 0\n\n # Gutenberg Dataset\n self.gutenberg_texts = open(GUTENBERG_DATASET_PATH + 'train.txt').readlines()\n self.gutenberg_idx = 0\n self.make_test_set()\n\n def make_test_set(self):\n self.test_z_i, self.test_z_ni, self.test_tar = [], [], []\n self.cornel_initial_id = 0\n for i in range(20): # 800 examples\n self.load_next_cornell()\n self.test_z_i += self.z_i\n self.test_z_ni += self.z_ni\n self.test_tar += self.tar\n self.cornel_initial_id = self.cornell_idx\n\n def get_test_set(self):\n Z_i = self.format_text(self.test_z_i)\n Z_ni = self.format_text(self.test_z_ni)\n Tar = self.format_text(self.test_tar)\n return Z_i, Z_ni, Tar\n\n def load_next_trn_file(self):\n rand = np.random.randint(0, 200)\n # return self.load_next_gutenberg()\n if rand < 5:\n return self.load_next_cornell()\n else:\n return self.load_next_gutenberg()\n\n def load_next_marvel(self):\n if self.marvel_current_movie_text_idx + self.batch_size >= len(self.marvel_movie_texts[self.marvel_current_movie_idx]):\n self.marvel_current_movie_idx += 1\n self.marvel_current_movie_text_idx = 0\n if self.marvel_current_movie_idx >= len(self.marvel_movie_texts):\n self.marvel_current_movie_idx = 0\n self.text = self.marvel_movie_texts[self.marvel_current_movie_idx][self.marvel_current_movie_text_idx:self.marvel_current_movie_text_idx + self.batch_size]\n self.marvel_current_movie_text_idx += self.batch_size\n return self.text\n\n def load_next_cornell(self):\n z_i, z_ni, tar, i = [], [], [], 0\n while i < self.batch_size:\n if self.cornell_idx + 1 + i >= len(self.cornel_text):\n self.cornell_idx = self.cornel_initial_id\n print('restarting cornell dataset ...')\n if self.cornel_line_id[self.cornell_idx + i] == self.cornel_line_id[self.cornell_idx + i + 1] - 1:\n if self.cornell_idx + i > 0 and self.cornel_line_id[self.cornell_idx + i - 1] == self.cornel_line_id[self.cornell_idx + i + 1] - 2:\n z_i += [self.cornel_text[self.cornell_idx + i - 1]]\n else:\n z_i += [' ']\n z_ni += [self.cornel_text[self.cornell_idx + i]]\n tar += [self.cornel_text[self.cornell_idx + i + 1]]\n i += 1\n else:\n self.cornell_idx += 1\n self.cornell_idx += self.batch_size\n\n self.z_i, self.z_ni, self.tar = z_i, z_ni, tar\n return self.z_i, self.z_ni, self.tar\n\n def load_next_gutenberg(self):\n z_i, z_ni, tar, i = [], [], [], 0\n while i < self.batch_size:\n if self.gutenberg_idx + 1 + i >= len(self.gutenberg_texts):\n self.gutenberg_idx = 0\n print('restarting gutenberg dataset ...')\n if self.gutenberg_texts[self.gutenberg_idx + i] != '' and self.gutenberg_texts[self.gutenberg_idx + i + 1] != '':\n if self.gutenberg_idx + i > 0 and self.gutenberg_texts[self.gutenberg_idx + i - 1] != '':\n z_i += [edit_text(self.gutenberg_texts[self.gutenberg_idx + i - 1])]\n else:\n z_i += [' ']\n z_ni += [edit_text(self.gutenberg_texts[self.gutenberg_idx + i])]\n tar += [edit_text(self.gutenberg_texts[self.gutenberg_idx + i + 1])]\n i += 1\n else:\n self.gutenberg_idx += 1\n self.gutenberg_idx += self.batch_size\n\n self.z_i, self.z_ni, self.tar = z_i, z_ni, tar\n return self.z_i, self.z_ni, self.tar\n\n def get_messages_and_actions(self):\n Z_ni = self.format_text(self.z_ni)\n Z_i = self.format_text(self.z_i)\n Tar = self.format_text(self.tar)\n return Z_i, Z_ni, Tar\n\n def format_text(self, texts):\n trn_datas = []\n for text in texts:\n trn_data = np.zeros((1, self.word_buffer_len))\n if text == \"No Messages Sent\" or pd.isna(text):\n text = ''\n tokens = self.tokenizer.encode(text)\n for j, token in enumerate(tokens[:self.word_buffer_len]):\n trn_data[0, j] = token\n trn_datas += [trn_data]\n return np.array(trn_datas)\n\n def convert_to_words(self, text):\n return self.tokenizer.decode(text)\n\n\nif __name__ == \"__main__\":\n td = TrainingData(20)\n td.load_next_trn_file()\n a, b, c = td.get_messages_and_actions()\n for i in range(20):\n print('Person A: ' + td.convert_to_words(a[i, 0, :]))\n print('Person B: ' + td.convert_to_words(b[i, 0, :]))\n print('Person A: ' + td.convert_to_words(c[i, 0, :]))\n print()\n\n # for s, t in zip(td.cornel_movie_speaker[:20], td.cornel_movie_texts[:20]):\n # print(s + ': ' + t)\n","repo_name":"jbskaggs/Llumi-Language-learning-using-models-of-intentionality","sub_path":"src/dataset_processing/training_data_movie.py","file_name":"training_data_movie.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35504112530","text":"\"\"\"\n * Sensor Eval\n * to_csv.py\n * @author Jialiang Shi\n \"\"\"\nimport copy\nfrom filedriver.cmpfile import CmpFile,DataTypeId\n\ndef InitCsv(head, fname):\n \"\"\"\n Initiate csv table head\n :param head: table head to use\n :param fname: the output name of the csv table\n :return: None\n \"\"\"\n l = len(head)\n with open(fname, 'w') as csvfile:\n csvfile.write((('%s,'*l)[:-1]+'\\n')% head)\n\ndef ExtractFrame(idx,cmpFile,fname):\n \"\"\"\n Extract useful features from a frame, and append to the csv table.\n :param idx: the frame idx\n :param cmpFile: cmpFile to use\n :param fname: the output name of the csv table\n :return: None\n \"\"\"\n frame = copy.deepcopy(cmpFile.ReadFrame(idx, DataTypeId.Object, 'begin'))\n timeStamp = frame.GetFrameHeader().TimeStamp.ToUnixStamp()\n frameIdx = frame.GetFrameHeader().FrameIdx\n objList = frame.GetDataBlock().Objects\n objNum = len(objList)\n # 需要与所需属性的header对应\n for i in range(objNum):\n obj = objList[i]\n obj_idx = i\n obj_id = obj.Id\n BdBox_0X = obj.BoundingBox[0].ToVector()[0]\n BdBox_0Y = obj.BoundingBox[0].ToVector()[1]\n BdBox_1X = obj.BoundingBox[1].ToVector()[0]\n BdBox_1Y = obj.BoundingBox[1].ToVector()[1]\n BdBox_2X = obj.BoundingBox[2].ToVector()[0]\n BdBox_2Y = obj.BoundingBox[2].ToVector()[1]\n BdBox_3X = obj.BoundingBox[3].ToVector()[0]\n BdBox_3Y = obj.BoundingBox[3].ToVector()[1]\n center_X = obj.Center.ToVector()[0]\n center_Y = obj.Center.ToVector()[1]\n className = obj.Classification.name\n velocity_X = obj.Velocity.ToVector()[0]\n velocity_Y = obj.Velocity.ToVector()[1]\n\n info = (frameIdx,\n timeStamp,\n obj_idx,\n obj_id,\n BdBox_0X,\n BdBox_0Y,\n BdBox_1X,\n BdBox_1Y,\n BdBox_2X,\n BdBox_2Y,\n BdBox_3X,\n BdBox_3Y,\n center_X,\n center_Y,\n className,\n velocity_X,\n velocity_Y,\n )\n info_str = tuple([str(i) for i in info])\n with open(fname, 'a') as csvfile:\n csvfile.write((('%s,' * len(info))[:-1] + '\\n') % info_str)","repo_name":"xiaomingLD/jiaojie","sub_path":"脚本/tocsv/to_csv.py","file_name":"to_csv.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74701664266","text":"from typing import Tuple\nimport os\n\nimport numpy as np\nimport taichi as ti\nimport matplotlib.pyplot as plt\n\n\n@ti.data_oriented\nclass BaseSim:\n def __init__(self, grid_resolution: Tuple[int], output_folder: os.PathLike):\n \"\"\"Base simulation class. Capable of creating grid of potential and its gradient\n Gradient computation is numerical\n\n Args:\n grid_resolution (Tuple[int]): Width and height resolution for potential and gradient\n output_folder (os.PathLike): Output folder\n \"\"\"\n self.output_folder = output_folder\n os.makedirs(output_folder, exist_ok=True)\n\n self.potential_gradient_grid = ti.Vector(2, dt=ti.f32)\n self.potential_grid = ti.Vector(1, dt=ti.f32)\n self.coords_grid = ti.Vector(2, dt=ti.f32)\n self.obstacle_grid = ti.Vector(1, dt=ti.i32)\n\n self.target_coordinate = ti.Vector(2, dt=ti.f32)\n self.velocity_direction = ti.Vector(2, dt=ti.f32)\n self.coordinate = ti.Vector(2, dt=ti.f32)\n self.velocity = ti.Vector(2, dt=ti.f32)\n self.acceleration = ti.Vector(2, dt=ti.f32)\n self.idx = ti.Vector(2, dt=ti.i32)\n\n self.hx = ti.var(dt=ti.f32)\n self.hy = ti.var(dt=ti.f32)\n\n self.grid_w, self.grid_h = grid_resolution\n\n ti.root.dense(ti.i, self.grid_w).dense(ti.j, self.grid_h).place(\n self.potential_gradient_grid, self.potential_grid, self.coords_grid, self.obstacle_grid\n )\n ti.root.place(self.idx)\n ti.root.place(self.velocity_direction)\n\n @ti.func\n def compute_potential_point(self):\n \"\"\"Function should compute potential value given the coorditane\n \"\"\"\n raise NotImplementedError\n\n @ti.kernel\n def compute_potential_grid(self):\n \"\"\"Kernel iterates though all the cells in the grid, stores the potential value\n \"\"\"\n for i, j in self.potential_grid:\n self.potential_grid[i, j][0] = self.compute_potential_point(self.coords_grid[i, j])\n\n @ti.kernel\n def compute_potential_grad_grid(self):\n \"\"\"Computes gradient grid from the potential grid, generated with compute_potential_grid function\n \"\"\"\n # https://numpy.org/doc/stable/reference/generated/numpy.gradient.html?highlight=gradient#numpy.gradient\n for i, j in self.potential_gradient_grid:\n if i == 0 or j == 0 or i == self.grid_w - 1 or j == self.grid_h - 1:\n continue\n\n self.potential_gradient_grid[i, j][0] = (\n self.potential_grid[i + 1, j][0] - self.potential_grid[i - 1, j][0]\n ) / (2 * self.hx)\n self.potential_gradient_grid[i, j][1] = (\n self.potential_grid[i, j + 1][0] - self.potential_grid[i, j - 1][0]\n ) / (2 * self.hy)\n\n @ti.kernel\n def find_cell(self, t: ti.i32):\n \"\"\"Stores the id of the cell the agent is in in the time id t\n\n Args:\n t (ti.i32): time id\n \"\"\"\n self.idx[None][0] = self.coordinate[t][0] // self.hx\n self.idx[None][1] = self.coordinate[t][1] // self.hy\n frac_x = self.idx[None][0] - self.coordinate[t][0] / self.hx\n frac_y = self.idx[None][1] - self.coordinate[t][1] / self.hy\n if frac_x >= 0.5:\n self.idx[None][0] += 1\n if frac_y >= 0.5:\n self.idx[None][1] += 1\n\n\n @ti.kernel\n def compute_obstacle_grid(self):\n \"\"\"Simple function that creates a rasterized obstacle grid\n \"\"\"\n for i, j in self.obstacle_grid:\n if (\n i == 0\n or j == 0\n or i == self.grid_w - 1\n or j == self.grid_h - 1\n or (j == self.grid_h // 2 and i == self.grid_w // 2)\n ):\n self.obstacle_grid[i, j][0] = 1\n\n def sim_step(\n self, t: ti.i32,\n ):\n \"\"\"Makes one step of the simulation\n\n Args:\n t (ti.i32): time id\n\n \"\"\"\n raise NotImplementedError\n\n def draw_potentials(self):\n \"\"\"Saves images of the potential and x and y derivatives\n \"\"\"\n pot_np = self.potential_grid.to_numpy().reshape(self.grid_w, self.grid_h)\n pot_np = pot_np + np.abs(pot_np.min())\n plt.imsave(os.path.join(self.output_folder, \"potential.jpg\"), pot_np / pot_np.max())\n pot_grad_np = self.potential_gradient_grid.to_numpy().reshape(self.grid_w, self.grid_h, 2)\n pot_grad_np = pot_grad_np + np.abs(pot_grad_np.min())\n plt.imsave(\n os.path.join(self.output_folder, \"potential_g0.jpg\"),\n pot_grad_np[:, :, 0] / (pot_grad_np.max() + 1e-3),\n )\n plt.imsave(\n os.path.join(self.output_folder, \"potential_g1.jpg\"),\n pot_grad_np[:, :, 1] / (pot_grad_np.max() + 1e-3),\n )\n\n plt.imsave(\n os.path.join(self.output_folder, \"obstacles.jpg\"),\n self.obstacle_grid.to_numpy().reshape(self.grid_w, self.grid_h),\n )\n\n def run_simulation(self):\n \"\"\"Function used to run the simulation\n \"\"\"\n raise NotImplementedError\n","repo_name":"belkakari/taichi_example","sub_path":"base_sim.py","file_name":"base_sim.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"72036750984","text":"from flask import Flask, request, redirect, jsonify, send_from_directory, render_template, session\nfrom flask import flash\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom flask_jwt_extended import (create_access_token, create_refresh_token,\n jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)\nfrom flask_mail import Mail, Message\nfrom AuthAPI import app, db, celery, config, common\nfrom AuthAPI import jwt, blacklist, blacklist_token, mail, serializer\nfrom AuthAPI.controller import process\nimport traceback\nimport uuid\nimport random\nimport os\nimport requests\n# Initialize models\nfrom AuthAPI.model.user import User\nfrom AuthAPI.model.role import Role\nfrom AuthAPI.model.kyc import Kyc\n\nfrom AuthAPI.model.permission import Permission\nfrom AuthAPI import redis_client\n\n\n@app.route('/login', methods=['POST'])\ndef check_login():\n \"\"\" check user login \"\"\"\n try:\n username = request.json.get('username')\n password = request.json.get('password')\n\n user: User = User.query.filter_by(username=username).first()\n if (not user):\n return jsonify({'status': 0, 'message': 'Dont Existed User'})\n\n if(user.confirmed is False):\n return jsonify({'status': -1, 'message': 'User dont active '})\n \n # Check Valid hash password\n if user.verify_password(password): # please recheck verify password\n # check again, why unused variable Role ?\n permission = Role.get_permission_by_role(user.role)\n data = {\n 'status': 1,\n 'role': user.role,\n 'permission':permission,\n 'user_name': user.username,\n 'user_id': user.id\n }\n expires = timedelta(seconds=172800)\n access_token = create_access_token(data)\n print(access_token)\n return jsonify({\n 'status': 1,\n 'access_token': access_token,\n 'exp': expires.total_seconds()\n })\n else:\n return jsonify({'status': 0, 'message': 'Invalid Password'})\n except:\n traceback.print_exc()\n return jsonify({'status': 0, 'message': 'Invalid Username or Password'})\n\n\n@app.route('/logout', methods=['POST','GET'])\n@jwt_required\ndef logout():\n jti = get_raw_jwt()['jti']\n \n blacklist.add(jti)\n redis_client.set(jti, datetime.now().isoformat())\n return jsonify({\"msg\": \"Successfully logged out\"})\n\n # return jsonify({\"msg\": \"Successfully logged out\"}), 200\n\n\n@app.route('/update-user', methods=['POST'])\n@jwt_required\ndef update_user():\n \"\"\" update user \"\"\"\n try:\n user_id = request.json.get('user_id')\n email = request.json.get('email')\n phone = request.json.get('phone')\n user = User.query.filter_by(id=user_id).first()\n user.email = email\n user.phone = phone\n user.updateAt = datetime.utcnow()\n db.session.commit()\n return jsonify({'status': 1})\n except:\n traceback.print_exc()\n return jsonify({'status': 0})\n\n@app.route('/update-role-user', methods=['POST'])\n@jwt_required\ndef update_role_user():\n \"\"\" update user \"\"\"\n try:\n user_id = request.json.get('user_id')\n role = request.json.get('role')\n user = User.query.filter_by(id=user_id).first()\n user.role = role\n user.updateAt = datetime.utcnow()\n db.session.commit()\n return jsonify({'status': 1})\n except:\n traceback.print_exc()\n return jsonify({'status': 0})\n\n@app.route('/new-user', methods=['POST'])\ndef new_user():\n \"\"\" add new user \"\"\"\n try:\n username = request.json.get('username')\n password = request.json.get('password')\n email = request.json.get('email')\n domain_active = request.json.get('domain_active')\n # Use scalar is appreciated\n checked_username = User.query.filter_by(\n username=username).scalar() is not None\n checked_email = User.query.filter_by(email=email).scalar() is not None\n\n if (checked_email or checked_username):\n return jsonify({'status': 0, 'message': 'Email or Username Existed! Please type new username or email'})\n\n user = User(username,User.hash_password(password),email,\"guest\",format(random.randint(0,99999999), '08d'))\n user.confirmed = False\n\n user.confirmed_on = datetime.now()\n\n if domain_active is not None:\n process.send_async_email.delay(domain_active, email, username)\n db.session.add(user)\n db.session.commit()\n return jsonify({'status': 1, 'message': 'sign up success', 'email': email})\n except:\n traceback.print_exc()\n return jsonify({'status': 0, 'message': 'sign up error'})\n\n\n@app.route('/confirm-account', methods=['POST'])\ndef confirm_account():\n try:\n email_verify = request.json.get('email')\n token = request.json.get('token')\n print(email_verify)\n user = User.query.filter_by(email=email_verify).first()\n if(user.confirmed):\n return jsonify({\n 'status': 1,\n 'message': 'Account already confirmed. Please login'\n })\n if(common.checkBlacklist(blacklist_token, token)):\n return jsonify({\n 'status': -1,\n 'message': 'The confirmation link is invalid or has expired.'\n })\n try:\n email = process.confirm_token(token)\n except:\n blacklist_token.add(token)\n traceback.print_exc()\n return jsonify({\n 'status': -1,\n 'message': 'The confirmation link is invalid or has expired.'\n })\n if (not email):\n return jsonify({\n 'status': -1,\n 'message': 'The confirmation link is invalid or has expired.'\n })\n if (email_verify != email):\n return jsonify({\n 'status': -1,\n 'message': 'The confirmation link is invalid or has expired.'\n })\n user.role =\"trader\"\n user.confirmed = True\n user.confirmed_on = datetime.now()\n data_request = {\n 'user_id': user.id,\n 'currency': \"BTC\"\n }\n try:\n res_btc = requests.post(config.DEPOST_APP + '/account/open', json = data_request)\n data_request['currency']='ETH'\n res_eth = requests.post(config.DEPOST_APP + '/account/open', json = data_request)\n data_request['currency']='USDT'\n res_usdt = requests.post(config.DEPOST_APP + '/account/open', json = data_request)\n data_request['currency']='VND'\n res_vnd = requests.post(config.DEPOST_APP + '/account/open', json = data_request)\n except:\n traceback.print_exc()\n pass\n \n db.session.commit()\n return jsonify({\n 'status': 1,\n 'message': 'Account confirm Success. Please login'\n })\n except:\n traceback.print_exc()\n return jsonify({\n 'status': 0,\n 'message': 'Account confirm error. Please reconfirm account'\n })\n\n\n@app.route('/resend-confirm', methods=['POST'])\ndef resend_confirm():\n try:\n email = request.json.get('email')\n domain_active = request.json.get('domain_active')\n user = User.query.filter_by(email=email).first()\n process.send_async_email.apply_async(\n args=[domain_active, email, user.username], countdown=10)\n return jsonify({\n 'status': 'success',\n 'message': 'Resend Email success'\n })\n except:\n traceback.print_exc()\n return jsonify({\n 'status': 'error',\n 'message': 'Resend Email error. Please resend email to active your account'\n })\n\n@app.route('/role', methods=['POST'])\ndef new_role():\n \"\"\"create new role\"\"\"\n try:\n role_name = request.json.get('role_name')\n description = request.json.get('description')\n return jsonify(process.add_new_role(role_name, description))\n except:\n traceback.print_exc()\n return jsonify({'status': 0, 'message': 'Invalid Role'})\n\n\n@app.route('/update-role', methods=['POST'])\ndef update_role():\n \"\"\"update role\"\"\"\n try:\n role_id = request.json.get('role_id')\n role_name = request.json.get('role_name')\n role_description = request.json.get('description')\n \n role = Role.query.filter_by(id=role_id).first()\n role.name = role_name\n role.description = role_description\n db.session.commit()\n return jsonify({'status': 1,\n 'message': 'update role success'})\n except:\n traceback.print_exc()\n return jsonify({'status': 0, 'message': 'Invalid Role'})\n\n\n@app.route('/delete-role', methods=['POST'])\ndef delete_role():\n \"\"\"delete role\"\"\"\n try:\n role_id = request.json.get('role_id')\n role = Role.query.filter_by(id=role_id)\n if (not role):\n return jsonify({\n 'status': 0,\n 'message': 'Role dont exited'\n })\n db.session.delete(role)\n db.session.commit()\n return jsonify({\n 'status': 1,\n 'message': 'Delete role success'\n })\n except:\n traceback.print_exc()\n return jsonify({\n 'status': 0,\n 'message': 'Delete role error'\n })\n\n\n@app.route('/add-permission', methods=['POST'])\ndef add_premission():\n \"\"\"add permission to role\"\"\"\n role_id = request.json.get('role_id')\n permission = request.json.get('permission')\n new_permission = Permission(role_id, permission)\n db.session.add(new_permission)\n db.session.commit()\n return jsonify({\n 'status': 1,\n 'message': 'Add permission success'\n })\n \n\n\n@app.route('/delete-permission', methods=['POST'])\ndef remove_premission():\n \"\"\"remove permission to role\"\"\"\n permision_query = request.json.get('permission')\n permission = Permission.query.filter_by(permission = permision_query)\n if (not permission):\n return jsonify({\n 'status': 0,\n 'message': 'Role dont exited'\n })\n db.session.delete(permission)\n db.session.commit()\n return jsonify({\n 'status': 1,\n 'message': 'Delete permission success'\n })\n@app.route('/add-kyc', methods=['POST'])\ndef add_kyc():\n \"\"\"add kyc to user\"\"\"\n try:\n full_name = request.form.get('full_name')\n phone = request.form.get('phone')\n id_verify = request.form.get('id_verify')\n user_id = request.form.get('user_id')\n print(full_name)\n image_front = request.files['image_front_verify']\n image_selfie = request.files['image_selfie_verify']\n image_with = request.files['image_with_verify']\n image_alternative = request.files['image_alternative']\n print(image_front)\n dictory_user = os.path.join(config.FOLDER_KYC,user_id)\n if not os.path.exists(dictory_user):\n os.makedirs(dictory_user)\n\n user = User.query.filter_by(id = user_id).first()\n list_name = common.saveListImage([image_front,image_selfie,image_with,image_alternative],dictory_user,user.username)\n kyc = Kyc(full_name,phone,id_verify,list_name[0],list_name[1],list_name[2],list_name[3],user_id)\n db.session.add(kyc)\n db.session.commit()\n return jsonify({\n 'status': 1,\n 'message': 'Info user to verify success, Plese wait some minute to supporter verify info account '\n })\n except:\n traceback.print_exc()\n return jsonify({\n 'status': 0,\n 'message': 'Upload Info Kyc error'\n })\n\n@app.route('/update-kyc', methods=['POST'])\ndef update_kyc():\n \"\"\"update info kyc of user if verify fail\"\"\"\n try:\n id= request.form.get('id')\n print(id)\n kyc = Kyc.query.filter_by(id = id).first()\n user_id = kyc.user_id\n field_error = request.form.get('field_error')\n folder = os.path.join(config.FOLDER_KYC,str(user_id))\n user = User.query.filter_by(id = user_id).first()\n username = user.username\n \n status = request.form.get('status')\n kyc.status = status\n kyc.field_error = field_error\n print(request.files)\n if not os.path.exists(folder):\n os.makedirs(folder)\n if(\"full_name\" in request.form):\n full_name = request.form.get('full_name')\n kyc.full_name = full_name\n if(\"phone\" in request.form):\n phone = request.form.get('phone')\n kyc.phone = phone\n if(\"id_verify\" in request.form):\n id_verify = request.form.get('id_verify')\n kyc.id_verify = id_verify\n if(\"image_front_verify\" in request.files):\n image_front_verify = request.files[\"image_front_verify\"]\n image_front_verify.save(os.path.join(folder,username+\"_front.jpg\"))\n kyc.image_front_verify = os.path.join(folder,username+\"_front.jpg\").replace(\"/AuthAPI/static\",\"\")\n if(\"image_selfie_verify\" in request.files):\n image_selfie_verify = request.files[\"image_selfie_verify\"]\n image_selfie_verify.save(os.path.join(folder,username+\"_selfie.jpg\"))\n kyc.image_selfie_verify = os.path.join(folder,username+\"_selfie.jpg\").replace(\"/AuthAPI/static\",\"\")\n if(\"image_with_verify\" in request.files):\n image_with_verify = request.files[\"image_with_verify\"]\n image_with_verify.save(os.path.join(folder,username+\"_with_exchange.jpg\"))\n kyc.image_with_verify = os.path.join(folder,username+\"_with_exchange.jpg\").replace(\"/AuthAPI/static\",\"\")\n if(\"img_alternative\" in request.files):\n image_alternative = request.files[\"img_alternative\"]\n image_alternative.save(os.path.join(folder,username+\"_alternative.jpg\"))\n kyc.image_alternative = os.path.join(folder,username+\"_alternative.jpg\").replace(\"/AuthAPI/static\",\"\")\n\n db.session.add(kyc)\n db.session.commit()\n return jsonify({\n 'status': 1,\n 'message': 'Update info kyc of user success'\n })\n except:\n traceback.print_exc()\n return jsonify({\n 'status': 0,\n 'message': 'Error System'\n })\n \n@app.route('/checked-kyc', methods=['POST'])\ndef checked_kyc():\n \"\"\"remove permission to role\"\"\"\n user_id = request.json.get('user_id')\n kyc = Kyc.query.filter_by(user_id = user_id).first()\n print(kyc)\n if(kyc is None):\n return jsonify({\n 'status': -1\n })\n if(kyc.status != 1):\n return jsonify({\n 'status': kyc.status,\n 'message': 'kyc error',\n \"file_error\": kyc.field_error,\n \"id\": kyc.id\n }) \n else: \n return jsonify({\n 'status': kyc.status,\n 'message': 'kyc success',\n \"file_error\": \"\",\n \"id\": kyc.id\n })\n@app.route('/kyc-pending', methods=['POST'])\ndef kyc_pending():\n \"\"\"remove permission to role\"\"\"\n try:\n kyc = Kyc.query.filter_by(status = 0).all()\n print(kyc)\n if(kyc is None):\n return jsonify({\n 'status': -1\n })\n data = [common.getFullAttr(item) for item in kyc]\n print(data)\n return jsonify({\n 'status': 1,\n 'kyc' : data\n \n })\n except:\n traceback.print_exc()\n return jsonify({\n 'status': -1\n })\n \n ","repo_name":"Hiraki99/auth-api-exchange","sub_path":"AuthAPI/controller/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5304822146","text":"from fastapi import FastAPI\nfrom ..src.classes.prediction import ImmothepPrediction\nimport numpy as np\n\npredictor = ImmothepPrediction()\n\napp = FastAPI()\n\n@app.get(\"/\")\ndef welcome_message():\n return {\"welcome_message\": \"World\"}\n\n@app.get(\"/api/estimate\")\ndef estimate(metre_carre: float, nb_pieces: int, terrain: float, code_postal: int):\n model_appart = predictor.trainLinearLeRetourAPIAppart(code_postal)\n model_maison = predictor.trainLinearLeRetourAPIMaison(code_postal)\n\n estimation_appart = predictor.predictionLinearAPI(nb_pieces, metre_carre, terrain, model_appart)\n estimation_appart = np.around(estimation_appart.item(), decimals=2)\n estimation_appart = str(estimation_appart)\n\n estimation_maison = predictor.predictionLinearAPI(nb_pieces, metre_carre, terrain, model_maison)\n estimation_maison = np.around(estimation_maison.item(), decimals=2)\n estimation_maison = str(estimation_maison)\n return {\"estimate_appartment\": estimation_appart + \" €\", \"estimate_house\": estimation_maison + \" €\"}\n","repo_name":"Twizzle1997/Immothep","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22166078223","text":"# from django.shortcuts import render\n# Create your views here.\n# views.py\n\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nimport os\n\n\n@csrf_exempt\ndef send_email(request):\n \"\"\"\n NOT USED!!!!\n \"\"\"\n if request.method == \"POST\":\n data = json.loads(request.body)\n key1 = \"SG._T2f78RJTH6bO_-RlauP5w.51nF\"\n name = data.get(\"name\")\n email = data.get(\"email\")\n message = data.get(\"message\")\n\n print(f\"DATA: {data}\")\n print(f\"Name: {name}\")\n print(f\"Email: {email}\")\n print(f\"Message: {message}\")\n\n message_text = f\"\"\"We've got a new business lead from the integral website! Find the details below:\\n\n Name: {name}\n Email: {email}\\n\n Message: {message}\n\n Thanks,\n Integral Bot\n \"\"\"\n\n key2 = \"\"\n message = Mail(\n from_email=\"integralnewbusiness@gmail.com\",\n to_emails=[\"amlvt225@icloud.com\"],\n subject=f\"New Business Opp - Message from {name} via Contact Form\",\n plain_text_content=message_text,\n )\n try:\n sg = SendGridAPIClient(f\"{key1}{key2}\")\n response = sg.send(message)\n print(response.status_code)\n print(response.body)\n print(response.headers)\n return JsonResponse({\"status\": \"success\"}, status=200)\n except Exception as e:\n print(e)\n return JsonResponse({\"status\": \"error\", \"error\": str(e)}, status=500)\n else:\n return JsonResponse(\n {\"status\": \"error\", \"error\": \"Only POST method is allowed\"}, status=400\n )\n\n\n# @csrf_exempt\n# def send_email(request):\n# if request.method == 'POST':\n\n\n# data = json.loads(request.body)\n# name = data.get('name')\n# email = data.get('email')\n# message = data.get('message')\n# print(f\"DATA: {data}\")\n# print(f\"Name: {name}\")\n# print(f\"Email: {email}\")\n# print(f\"Message: {message}\")\n\n# # import smtplib\n\n# # sender = 'integralinquiries@integral.io'\n# # receivers = ['integraldatainquiries@yahoo.com']\n\n# # # From: Integral New Business \n# # # To: Integral New Business \n# # # Subject: New Business Inquiry From Website\n# # to_send = f\"\"\"\n# # Hi Jeff,\n\n# # I hope you are doing well! Hello. You have receive a new business request from the website.\n\n# # Find the details below:\n# # Name: {name}\n# # Email: {email}\n# # Message: {message}\n# # \"\"\"\n# # try:\n# # smtpObj = smtplib.SMTP('localhost')\n# # smtpObj.sendmail(sender, receivers, to_send)\n# # print(\"Successfully sent email\")\n# # except SMTPException:\n# # print(\"Error: unable to send email\")\n\n\n# try:\n\n\n# send_mail(\n# f'Message from {name} via Contact Form',\n# message,\n# email,\n# ['integraldatainquiries@yahoo.com'], # Replace with your email\n# )\n# return JsonResponse({'status':'success'}, status=200)\n# except Exception as e:\n# print(e)\n# return JsonResponse({'status':'error', 'error': str(e)}, status=500)\n\n# else:\n# return JsonResponse({'status':'error', 'error':'Only POST method is allowed'}, status=400)\n","repo_name":"integral-data/integral_website","sub_path":"backend/main_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72385902666","text":"import logging\nimport logging.config\n\nfrom src.drives.kafka import Kafka\nfrom src.drives.enums import Topics\nfrom src.drives.kafka_template import AbsctractKafka\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nlogging.config.fileConfig('logging.conf')\nlog = logging.getLogger('mainCrawler')\n\nurl = 'https://portal.ifba.edu.br/conquista/noticias-2/noticias-campus-vitoria-da-conquista'\n\n\ndef get_dat_publish(article):\n data_modification = article.header.find(\n class_='documentByLine').getText().replace(\" \", \"\").replace('\\n', \"\")\n date = data_modification[data_modification.find('modificação') + 11:]\n return date\n\n\ndef crawler(page_url: str, verify: bool):\n page = requests.get(page_url, verify=verify)\n soup = BeautifulSoup(page.text, 'html.parser').find_all(\n 'article', class_='entry')\n posts = []\n for article in soup:\n summary = article.header.find(class_='summary')\n article = {\n 'title': summary.a.getText(),\n 'link': summary.a['href'],\n 'date': get_dat_publish(article)\n }\n posts.append(article)\n return posts\n\n\ndef get_posts(start=1) -> list:\n b_start = 0 if start == 1 else 30 * (start - 1)\n page_url = '{}?b_start:int={}'.format(url, b_start)\n posts = crawler(page_url, verify=True)\n return posts\n\n\ndef send_to_queue(queue: AbsctractKafka, posts):\n log.debug('Sending to queue...')\n queue.send_message(Topics.NEWS, str(posts))\n\n\ndef handler(queue: AbsctractKafka):\n\n posts = get_posts()\n for post in posts:\n send_to_queue(queue, post)\n\n\nif __name__ == '__main__':\n log.info(\"Starting...\")\n handler(Kafka())\n log.info(\"Finished...\")\n","repo_name":"flaviofilipe/ifba-kafka-news","sub_path":"main_crawler.py","file_name":"main_crawler.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20810689532","text":"import importlib\r\npython_salpa = importlib.import_module('.', 'python-salpa')\r\nimport numpy as np\r\n\r\ndata = np.array(data)\r\ntau = int(tau)\r\n\r\nkwargs = {}\r\n\r\nrail1 = np.array(rail1)\r\nrail2 = np.array(rail2)\r\nthresh = np.array(thresh)\r\nt_blankdepeg = np.array(t_blankdepeg)\r\nt_ahead = np.array(t_ahead)\r\nt_chi2 = np.array(t_chi2)\r\nt_forcepeg = np.array(t_forcepeg)\r\n\r\nif np.any(rail1):\r\n kwargs['rail1'] = rail1\r\n\r\nif np.any(rail2):\r\n kwargs['rail2'] = rail2\r\n\r\nif np.any(thresh):\r\n kwargs['thresh'] = thresh\r\n\r\nif np.any(t_blankdepeg):\r\n kwargs['t_blankdepeg'] = int(t_blankdepeg)\r\n\r\nif np.any(t_ahead):\r\n kwargs['t_ahead'] = int(t_ahead)\r\n\r\nif np.any(t_chi2):\r\n kwargs['t_chi2'] = int(t_chi2)\r\n\r\nif np.any(t_forcepeg):\r\n kwargs['t_forcepeg'] = int(t_forcepeg)\r\n\r\ndata = python_salpa.salpa(data, tau, **kwargs)","repo_name":"FrancescoNegri/matlab-salpa","sub_path":"wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13972916662","text":"#%matplotlib inline\r\nimport matplotlib.pyplot as plt\r\nimport math as mt\r\nimport numpy as np\r\nimport os\r\nimport pandas as pd\r\nimport seaborn as sn\r\n\r\ndata= pd.read_excel('breastcancer_dataset_standard_format.xlsx')\r\n\r\ndata['x6'].replace(-1,np.nan,inplace=True)\r\nsn.heatmap(data.isnull(),cbar=False)\r\n#heatmap to see null values in the dataset\r\nplt.show()\r\n\r\ndata['x6'].replace(np.nan,0,inplace=True)\r\nmean_col6=data['x6'].mean()\r\nfloorcal=mt.floor(mean_col6)\r\n#taking the floor value\r\ndata['x6'].replace(0,floorcal,inplace=True)\r\nprint(data['x6'].value_counts())\r\n#0 and 1 are catagories\r\nprint(data['y'].value_counts())\r\n\r\ndata.hist(bins=50,figsize =(15,15))\r\nplt.show()\r\ndata.head(5)\r\n\r\ndata.drop(columns='sample number',inplace=True)\r\n\r\nX=data[['x1','x2','x3','x4','x5','x6','x7','x8','x9']]\r\nY=data['y']\r\n\r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=0)\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nx_train = sc.fit_transform(x_train)\r\nx_test = sc.transform(x_test)\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nlogisticRegr = LogisticRegression()\r\n\r\nlogisticRegr.fit(x_train, y_train)\r\n\r\nY_pred = logisticRegr.predict(x_test)\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, Y_pred)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(accuracy_score(y_test , Y_pred))\r\nprint(Y_pred)","repo_name":"abhishek-code8/Python","sub_path":"Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29561810976","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic.edit import CreateView\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.views.generic import View, TemplateView\nfrom django.views import generic\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.contrib import messages\nfrom search_views.search import SearchListView\nfrom .models import Product\nfrom .forms import UserForm\nfrom base.forms import UserForm\nfrom rest_framework.decorators import api_view\nfrom categories.models import Category\n\nclass Index(generic.ListView):\n template_name = \"base/index.html\"\n context_object_name = \"product\"\n\n def get_queryset(self):\n return Product.objects.all()\n\n\ndef detail(request,slug):\n products = ProductModel.objects.filter(active=True)\n categories = CategoryModel.objects.filer(active=True)\n if request.method ==\"POST\":\n form =ReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.product = product\n review.user = request.user\n review.save()\n messages.success(request, \"Review saved\")\n else:\n messages.error(request, \"invalid form\")\n else:\n form =ReviewForm()\n\n context = {\"product\" : product, \"products\" : products, \"catergories\" : categories, \"title\":cat.name + \" - Categories\", \"form\" : form}\n\n return render(request, \"base/detail.html\", context)\n\nclass Contact_Us(TemplateView):\n template_name = \"base/contact-us.html\"\n\n@login_required\ndef special(request):\n return HttpResponse(\"You are logged in !\")\n\n@login_required\ndef user_logout(request):\n logout(request)\n return redirect('base:index')\n\n@login_required\ndef profile(request):\n return render(request, 'base/profile.html')\n \ndef register(request):\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n if user_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n profile = user_form.save(commit=False)\n profile.user = user\n if 'profile_pic' in request.FILES:\n print('found it')\n profile.profile_pic = request.FILES['profile_pic']\n profile.save()\n registered = True\n else:\n print(user_form.errors)\n else:\n user_form = UserForm()\n return render(request,'base/index.html',\n {'user_form':user_form,\n 'registered':registered})\n \ndef user_login(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('base:index'))\n else:\n return HttpResponse(\"Your account was inactive.\")\n else:\n print(\"Someone tried to login and failed.\")\n print(\"They used username: {} and password: {}\".format(username,password))\n return HttpResponse(\"Invalid login details given\")\n else:\n return render(request, 'base/index.html', {})\n\ndef get_category(request, slug):\n selected_category= get_object_or_404(Category, slug=slug)\n return render (request, 'base/shop.html',\n {'category':selected_category},\n {'business':Business.objects.all()})\n\n","repo_name":"Muneer24/Muneer24.github.io","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35342107100","text":"import os,json,glob\nfrom shutil import copyfile\nimport tqdm\n#copyfile(src, dst)\nallannfiles=glob.glob('/data01/smoking/A_ano/a/jw/output/*.json')\ndef convert_xminymin_xcenterycenter(h, w, xmin, ymin, xmax, ymax):\n # < x_center > < y_center > < width > < height > - float values relative to width and height of image, it can be equal from (0.0 to 1.0]\n dw = 1. / (float(w))\n dh = 1. / (float(h))\n x = (xmin + xmax) / 2.0\n\n y = (ymin + ymax) / 2.0\n w = xmax - xmin\n h = ymax - ymin\n x = round(x * dw, 6)\n w = round(w * dw, 6)\n y = round(y * dh, 6)\n h = round(h * dh, 6)\n # return x, y, w, h\n return f'{x} {y} {w} {h}'\nannsdir='/data01/yuanpu/smoke/data/anns'\npicturesdir='/data01/smoking/A_ano/a/jw'\nfor idx,i in tqdm.tqdm(enumerate(allannfiles)):\n obj=json.loads(''.join(list(map(str.strip,open(i,'r').readlines()))))\n annfilename=os.path.splitext(obj['asset']['name'])[0]+'.txt'\n h,w=obj['asset']['size']['height'],obj['asset']['size']['width']\n with open(os.path.join(annsdir,annfilename),'w') as fid:\n for xx in obj['regions']:\n a,b,c,d=map(float,xx['boundingBox'].values())\n obbox=convert_xminymin_xcenterycenter(h,w,c,d,c+b,d+a)\n fid.write(f\"0 {obbox}\\n\")\n copyfile(os.path.join(picturesdir,obj['asset']['name']),os.path.join(annsdir,obj['asset']['name']),)\n# if idx>10:\n# break\n","repo_name":"ald2004/cigrettes-smoke-detect","sub_path":"vott2yolo.py","file_name":"vott2yolo.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3273958059","text":"\"\"\"\nMit diesem Skript können mithilfe von Regex-Patterns automatisch Alters- und Geschlechtsangaben\naus Reddit-Posts ausgelesen und annotiert werden.\nEs wird ein pandas Dataframe zurückgegeben, welches in anderen Skripts verwendet, oder\nals csv gespeichert werden kann.\n\"\"\"\nimport json\nimport re\nimport pandas as pd\nimport os\nfrom tqdm import tqdm\nfrom models.zdl_vector_model import AREAL_DICT\n\n\ndef get_age_from_str(input_str: str) -> int:\n \"\"\" finds double-digit number within string \"\"\"\n search = re.search(r\"[1-9][0-9]\", input_str)\n age = search.group(0)\n\n return int(age)\n\ndef get_gender_from_str(input_str: str) -> str:\n \"\"\" finds gender tag in string \"\"\"\n search = re.search(r\"(m|M|f|F)\", input_str)\n if search:\n if search.group(0).upper() == \"M\":\n return \"male\"\n else:\n return \"female\"\n else:\n return \"N/A\"\n \ndef __clean_text(content: str, age) -> str:\n\n for put in [f\"M{age}\", f\"{age}M\", f\"m{age}\", f\"{age}m\", f\"f{age}\", f\"{age}f\", f\"F{age}\", f\"{age}F\", f\"{age}\"]:\n content = content.replace(put, \"[REDACTED]\")\n\n return content\n\ndef find_matches(data: dict, locale: str) -> list[dict]:\n\n \"\"\" finds gender or age tags within a reddit post and returns them \n in an annotated dictionary \"\"\"\n\n dict_of_texts_with_matches = {}\n\n for n, comment in enumerate(data[\"data\"]):\n\n selftext: str = comment[\"selftext\"]\n\n if selftext == \"[removed]\":\n continue\n\n m1 = re.finditer(r\"(mir|ich) (\\(|\\[)(M|m|F|f)[1-9][0-9](M|m|F|f)?(\\)|\\])\", selftext.lower())\n m2 = re.finditer(r\"(bin) [1-9][0-9](M|m|F|f)?\", selftext.lower())\n m3 = re.finditer(r\"(\\(|\\[)[1-9][0-9], (m|w|f)(\\)|\\])\", selftext.lower())\n\n result = [x.group() for x in m1] + [x.group() for x in m2] + [x.group() for x in m3] \n result_as_str = \"\".join(result)\n if result:\n dict_of_texts_with_matches[f\"item_{n}\"] = {\"match\": result_as_str, \"content\": selftext}\n\n for item in dict_of_texts_with_matches.keys():\n row = dict_of_texts_with_matches[item]\n age = get_age_from_str(row[\"match\"])\n gender = get_gender_from_str(row[\"match\"])\n dict_of_texts_with_matches[item][\"age\"] = age\n dict_of_texts_with_matches[item][\"sex\"] = gender\n dict_of_texts_with_matches[item][\"regiolect\"] = locale\n\n content = dict_of_texts_with_matches[item][\"content\"]\n content = __clean_text(content, age)\n\n dict_of_texts_with_matches[item][\"content\"] = content\n\n return dict_of_texts_with_matches\n\ndef annotate_to_dataframe(path: str, locale: str) -> pd.DataFrame:\n with open(path, \"r\") as f:\n data = json.load(f)\n\n matches = find_matches(data, locale)\n\n text_data = pd.DataFrame.from_dict(matches).transpose()\n text_data.reset_index(drop=True, inplace=True)\n\n return text_data\n\nif __name__ == \"__main__\":\n\n datasets = []\n path = \"test\"\n for subdir in [\"dating\"]: #, \"education\", \"profession\", \"locales\"]:\n print(\"annotating data from r/{}.\".format(subdir))\n directory_in_str = f\"{path}/reddit/{subdir}\"\n directory = os.fsencode(directory_in_str)\n for file in tqdm(os.listdir(directory)):\n filename = os.fsdecode(file)\n if filename.endswith(\".json\"): \n\n if subdir == \"locales\":\n r = str(filename.split(\".\")[0])\n for key in AREAL_DICT.keys():\n if r in AREAL_DICT[key]:\n locale = key\n else:\n locale = \"\"\n text_data = annotate_to_dataframe(f\"{directory_in_str}/{filename}\", locale)\n datasets.append(text_data)\n\n\n data: pd.DataFrame = pd.concat(datasets)\n print(data.head())\n print(len(data.index))\n print(list(set(data.sex.tolist())))\n\n data.to_parquet('test/reddit/annotated_posts_2.parquet')","repo_name":"kobrue02/BERTective","sub_path":"annotate_csv.py","file_name":"annotate_csv.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33390331878","text":"import struct\nimport socket\nfrom abc import ABCMeta, abstractstaticmethod\nfrom .PacketIGMPMSourceAddress import PacketIGMPMSourceAddress\nfrom .PacketGroupRecord import PacketGroupRecord\n\n\nclass PacketIGMPv3HeaderReport:\n \"\"\"\n 0 1 2 3\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n | Reserved | Number of Group Records (M) |\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n | |\n . .\n . Group Record [1] .\n . .\n | |\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n | . |\n . . .\n | . |\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n | |\n . .\n . Group Record [M] .\n . .\n | |\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\n \"\"\"\n\n IGMP_TYPE = 0x22\n MAX_TIME = 0 #RESERVED SPACE\n\n IGMP_VERSION = 3\n\n IGMP_HDR_R = \"! H H\"\n IGMP_HDR_R_LEN = struct.calcsize(IGMP_HDR_R)\n\n #The Reserved fields are set to zero on transmission, \n # and ignored on reception.\n IGMP_MAX_TIME = 0\n\n\n def __init__(self, reserved_hexa):\n self.reserved = reserved_hexa\n self.group_records = []\n \n\n def getReserved(self):\n return self.reserved\n\n\n def addGroupRecord(self, group_rec: PacketGroupRecord):\n isAlready = False\n for i in self.group_records:\n if i == group_rec:\n isAlready = True\n break\n if isAlready == False:\n self.group_records.append(group_rec)\n \n\n def bytes(self) -> bytes:\n \"\"\"\n Obtain packet in byte format\n \"\"\"\n msg = struct.pack(PacketIGMPv3HeaderReport.IGMP_HDR_R, self.reserved, len(self.group_records))\n \n for group in self.group_records:\n msg += group.bytes()\n \n return msg\n\n\n @staticmethod\n def parse_bytes(data: bytes):\n \"\"\"\n From bytes parse and obtain the IGMP Header object and all its payload\n \"\"\"\n #Filter the data to get only the IGMP Report header\n header = data[0:PacketIGMPv3HeaderReport.IGMP_HDR_R_LEN]\n (reserved, number_groups) = struct.unpack(PacketIGMPv3HeaderReport.IGMP_HDR_R, header)\n packet = PacketIGMPv3HeaderReport(reserved)\n\n header = data[PacketIGMPv3HeaderReport.IGMP_HDR_R_LEN:]\n for i in range(0, number_groups):\n group = PacketGroupRecord.parse_bytes(header)\n packet.addGroupRecord(group)\n number_sources = group.getNumberSources()\n sources_len = number_sources * PacketIGMPMSourceAddress.SOURCE_ADDRESS_LEN\n next_byte_pos = PacketGroupRecord.GROUP_RECORD_LEN + sources_len\n # Strategy: send header to PacketGroupRecord\n # then ---> in here check number of sources from group \n # object and use its length to calculate next\n # position for the header pointer\n header = header[next_byte_pos:]\n return packet\n\n\n\n\n ","repo_name":"CatarinaGrilo/HPIM-SSM","sub_path":"hpim_ssm/Packet/PacketIGMPv3HeaderReport.py","file_name":"PacketIGMPv3HeaderReport.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28902276691","text":"import cv2\r\nimport numpy as np\r\n# from skimage.metrics import structural_similarity, peak_signal_noise_ratio\r\nimport os\r\nimport torch\r\nimport torchvision.transforms.functional as TF\r\nfrom pytorch_msssim import ssim\r\nfrom uqim_utils import getUIQM\r\n\r\n\r\n\r\ndef torchPSNR(tar_img, prd_img):\r\n imdff = torch.clamp(prd_img, 0, 1) - torch.clamp(tar_img, 0, 1)\r\n rmse = (imdff**2).mean().sqrt()\r\n ps = 20*torch.log10(1/rmse)\r\n return ps\r\n\r\ndef torchSSIM(tar_img, prd_img):\r\n return ssim(tar_img, prd_img, data_range=1.0, size_average=True)\r\n\r\n\r\n\r\ndef ComputePSNR_SSIM(img_dir,gt_path):\r\n error_list_ssim, error_list_psnr , error_list_uiqm= [],[],[]\r\n for dir_path in img_dir:\r\n enhanced_name = dir_path.split('\\\\')[-1]\r\n gt_name = enhanced_name\r\n enhanced = cv2.imread(dir_path)\r\n gt = cv2.imread(os.path.join(gt_path, gt_name))\r\n uiqm_data = getUIQM(enhanced)\r\n gt = TF.to_tensor(gt)\r\n enhanced = TF.to_tensor(enhanced)\r\n error_psnr = torchPSNR(gt, enhanced)\r\n gt = gt.unsqueeze(0)\r\n enhanced = enhanced.unsqueeze(0)\r\n error_ssim = torchSSIM(gt,enhanced)\r\n print(enhanced_name, uiqm_data, error_psnr, error_ssim)\r\n error_list_psnr.append(error_psnr)\r\n error_list_ssim.append(error_ssim)\r\n error_list_uiqm.append(uiqm_data)\r\n return np.array(error_list_ssim), np.array(error_list_psnr), np.array(error_list_uiqm)\r\n\r\nif __name__=='__main__':\r\n enhanced_path = r'data/output'\r\n gt_path=r'data/gt'\r\n img_name = os.listdir(enhanced_path)\r\n img_dir = [ os.path.join(enhanced_path,name) for name in img_name]\r\n ssims,psnrs,uiqms = ComputePSNR_SSIM(img_dir,gt_path)\r\n print (\"SSIM >> Mean: {:.4f} std: {:.4f}\".format(np.mean(ssims), np.std(ssims)))\r\n print (\"PSNR >> Mean: {:.4f} std: {:.4f}\".format(np.mean(psnrs), np.std(psnrs)))\r\n print (\"UIQM >> Mean: {:.4f} std: {:.4f}\".format(np.mean(uiqms), np.std(uiqms)))\r\n","repo_name":"zhoujingchun03/HAAM-GAN","sub_path":"PSNR_SSIM_UIQM.py","file_name":"PSNR_SSIM_UIQM.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21963107140","text":"import datetime\nimport json\nimport discord\nfrom discord.ext import commands\nfrom pytimeparse import parse as parse_time\nfrom bot_tools import setup, get_member, error_embed, success_embed, log, mute_member, check_mutes\n\nwith open(\"data/credentials.json\", \"r\") as creds:\n credentials = json.load(creds)\n\nbot = commands.Bot(\n command_prefix=credentials[\"prefix\"],\n description=\"SpeedRunners ModBot\"\n)\nbot.remove_command(\"help\")\n\nsr = credentials[\"server_main\"]\nsr_admin = credentials[\"admin_main\"]\n\nsrm = credentials[\"server_management\"]\nsrm_admin = credentials[\"admin_management\"]\n\nedit_log = credentials[\"edit_log\"]\ndelete_log = credentials[\"delete_log\"]\nmute_log = credentials[\"mute_log\"]\nkick_log = credentials[\"kick_log\"]\nban_log = credentials[\"ban_log\"]\n\nblacklist = open(\"data/blacklist.txt\", \"r\").read().split(\"\\n\")\n\n\ndef is_admin() -> bool:\n # @is_admin()\n async def predicate(ctx: commands.Context) -> bool:\n user_roles = [role.id for role in ctx.author.roles]\n return (sr_admin in user_roles or\n srm_admin in user_roles)\n\n return commands.check(predicate)\n\n\n@bot.event\nasync def on_message(message: discord.Message) -> None:\n if message.content in blacklist:\n await log(\"profanity\", {\n \"ID\": message.id,\n \"User\": str(message.author),\n \"Channel\": message.channel.name,\n \"Message\": message.clean_content,\n \"Timestamp\": (\n message.edited_at.now().isoformat()\n if message.edited_at is not None else\n message.created_at.now().isoformat()\n )\n })\n await message.delete()\n await mute_member(message.author, 3600)\n # up to you for how long, I put 1 hour\n\n\n@bot.listen()\nasync def on_ready() -> None:\n print(f\"Logged in as\\n{bot.user.name}\\n{bot.user.id}\\n{20 * '-'}\")\n setup(credentials, bot, mute_log)\n bot.loop.create_task(check_mutes())\n\n\n@bot.listen()\nasync def on_message_edit(before: discord.Message, after: discord.Message) -> None:\n if after.author.bot or after.guild == bot.get_guild(sr) or before.content == after.content:\n return\n\n desc = await log(\"edited_messages\", {\n \"ID\": after.id,\n \"User\": str(after.author),\n \"Channel\": after.channel.name,\n \"Before\": before.clean_content,\n \"After\": after.clean_content,\n \"Jump To\": f\"[Link]({after.jump_url})\",\n \"Timestamp\": (\n after.edited_at.now().isoformat()\n if after.edited_at is not None else\n after.created_at.now().isoformat()\n )\n })\n\n desc = \"\\n\".join([line for line in desc.split(\"\\n\") if not line.startswith(\"**User**\")])\n\n embed = discord.Embed(description=desc, color=0xFFA500)\n embed.set_author(name=str(after.author),\n url=after.author.avatar_url,\n icon_url=after.author.avatar_url\n )\n\n channel = bot.get_channel(edit_log)\n await channel.send(embed=embed)\n\n\n@bot.listen()\nasync def on_message_delete(message: discord.Message) -> None:\n if message.author.bot or message.guild != bot.get_guild(sr):\n return\n\n desc = await log(\"deleted_messages\", {\n \"ID\": message.id,\n \"User\": str(message.author),\n \"Channel\": message.channel.name,\n \"Message\": message.clean_content,\n \"Timestamp\": (\n message.edited_at.now().isoformat()\n if message.edited_at is not None else\n message.created_at.now().isoformat()\n )\n })\n\n desc = \"\\n\".join([line for line in desc.split(\"\\n\") if not line.startswith(\"**User**\")])\n\n embed = discord.Embed(description=desc, color=0xFF0000)\n embed.set_author(\n name=str(message.author),\n url=message.author.avatar_url,\n icon_url=message.author.avatar_url\n )\n\n channel = bot.get_channel(delete_log)\n await channel.send(embed=embed)\n\n\n@bot.command()\nasync def help(ctx: commands.Context) -> None:\n kick = f\"\\t{bot.command_prefix}**kick** \"\n ban = f\"\\t{bot.command_prefix}**ban** \"\n help = f\"\\t{bot.command_prefix}**help**\"\n online = f\"\\t{bot.command_prefix}**online**\"\n uptime = f\"Bot uptime: {str(datetime.datetime.now() - start_time)[:-7]}\"\n\n embed = discord.Embed(color=0x506600)\n embed.add_field(\n name=\"Commands:\",\n value=\"\\n\".join([kick, ban, online, help, \"\", uptime])\n )\n\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def online(ctx: commands.Context) -> None:\n guild = bot.get_guild(sr)\n member_count = guild.member_count\n status_count = [0, 0, 0, 0, 0] # online, offline, idle, dnd, invisible\n status_list = list(discord.Status)\n for member in guild.members:\n status_count[status_list.index(member.status)] += 1\n\n stats = discord.Embed(color=0x506600)\n stats.add_field(name=f\"Total members: {member_count}\", value=\"\\n\".join([\n f\"<:online:572884944813031434>{status_count[0]}\",\n f\"<:idle:572884943898673174>{status_count[2]}\",\n f\"<:do_not_disturb:572884944016113666>{status_count[3]}\",\n f\"<:offline:572884944343269378>{status_count[1] + status_count[4]}\"\n ]))\n await ctx.send(embed=stats)\n\n\n@bot.command()\nasync def ping(ctx: commands.Context) -> None:\n await ctx.send(f\"Pong! ({round(bot.latency, 3) * 1e3}ms)\")\n\n\n@bot.command()\n@is_admin()\nasync def mute(ctx: commands.Context, user: str, *args: str) -> None:\n member = await get_member(bot.get_guild(sr), user)\n if member is None:\n await ctx.send(embed=await error_embed(f\"Unknown user '{user}'\"))\n\n n_seconds = parse_time(\"\".join(*args))\n if n_seconds is None:\n await ctx.send(embed=await error_embed(f\"Unknown time period '{' '.join(*args)}'\"))\n\n await mute_member(member, n_seconds)\n await ctx.send(embed=await success_embed(f\"Muted '{user}' for {str(datetime.timedelta(seconds=n_seconds))}\"))\n\n desc = await log(\"mutes\", {\n \"Muted\": member.name,\n \"Muted by\": ctx.message.author.name,\n \"Time period\": str(datetime.timedelta(seconds=n_seconds)),\n \"Timestamp\": ctx.message.created_at.now().isoformat()\n })\n\n embed = discord.Embed(title=\"Muted member\", description=desc, color=0xFFA500)\n channel = bot.get_channel(mute_log)\n await channel.send(embed=embed)\n\n\n@bot.command()\n@is_admin()\nasync def kick(ctx: commands.Context, user: str, *args: str) -> None:\n member = await get_member(bot.get_guild(sr), user)\n if member is None:\n await ctx.send(embed=await error_embed(f\"Unknown user '{user}'\"))\n\n try:\n await bot.get_guild(sr).kick(member, reason=\" \".join(args))\n await ctx.send(embed=await success_embed(f\"Kicked '{user}'\"))\n except discord.Forbidden:\n await ctx.send(embed=await error_embed(\"This bot does not have the `kick_members` permission.\"))\n return\n except discord.HTTPException:\n await ctx.send(embed=await error_embed(\"HTTP Error, try again?\"))\n return\n\n desc = await log(\"kicks\", {\n \"Kicked\": member.name,\n \"Kicked by\": ctx.message.author.name,\n \"Reason\": \" \".join(args),\n \"Timestamp\": ctx.message.created_at.now().isoformat()\n })\n\n embed = discord.Embed(title=\"Kicked member\", description=desc, color=0xFFA500)\n channel = bot.get_channel(kick_log)\n await channel.send(embed=embed)\n\n\n@bot.command()\n@is_admin()\nasync def ban(ctx: commands.Context, user: str, *args: str) -> None:\n member = await get_member(bot.get_guild(sr), user)\n if member is None:\n await ctx.send(embed=await error_embed(f\"Unknown user '{user}'\"))\n\n try:\n await bot.get_guild(sr).ban(member, reason=\" \".join(args))\n await ctx.send(embed=await success_embed(f\"Banned '{user}'\"))\n except discord.Forbidden:\n await ctx.send(embed=await error_embed(f\"This bot does not have the `ban_members` permission.\"))\n return\n except discord.HTTPException:\n await ctx.send(embed=await error_embed(f\"HTTP Error, try again?\"))\n return\n\n desc = await log(\"bans\", {\n \"Banned\": member.name,\n \"Banned by\": ctx.message.author.name,\n \"Reason\": \" \".join(args),\n \"Timestamp\": ctx.message.created_at.now().isoformat()\n })\n\n embed = discord.Embed(title=\"Banned member\", description=desc, color=0xCC0000)\n channel = bot.get_channel(ban_log)\n await channel.send(embed=embed)\n\n\nstart_time = datetime.datetime.now()\n\nbot.run(credentials[\"token\"])\n","repo_name":"S0rax/modbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38983778076","text":"import requests\r\nimport parsel\r\nimport time\r\nimport execjs\r\nimport re\r\n\r\nheaders = {\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) \\\r\nAppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\r\n}\r\n\r\n\r\ndef get_pages_urls(star_name, page):\r\n target_url = []\r\n for i in range(1, page):\r\n url = 'https://www.pornhub.com/model/' + star_name + '/videos?page=' + str(i)\r\n response = requests.get(url, headers=headers)\r\n sel =parsel.Selector(response.text)\r\n movie_urls = sel.xpath('//*[@id=\"mostRecentVideosSection\"]/li/div/div/div/a/@href').extract()\r\n head = 'https://www.pornhub.com'\r\n for movie_url in movie_urls:\r\n target_url.append(head + movie_url)\r\n return target_url\r\n\r\ndef get_movieurls(target_urls):\r\n for url in target_urls:\r\n response = requests.get(url)\r\n sel = parsel.Selector(response.text)\r\n # 获取执行得所有js\r\n js_source = sel.xpath('//*[@id=\"player\"]/script[1]/text()').extract_first()\r\n name = sel.xpath('//*[@id=\"hd-leftColVideoPage\"]/div[1]/div[3]/h1/span/text()').extract_first()\r\n # print(js_source)\r\n # 截掉后面playerObjList部分\r\n source = js_source.split('playerObjList')[0]\r\n #获得flashvars 脚本函数名\r\n key = re.findall(r'flashvars_\\d+', source)[0]\r\n # 编译js\r\n js = execjs.compile(source)\r\n # 获取 key函数执行后的返回值\r\n data = js.eval(key)\r\n for md in data['mediaDefinitions']:\r\n video_url = md['videoUrl']\r\n quality = md['quality']\r\n _format = md['format']\r\n if _format == \"mp4\":\r\n movie = requests.get(video_url)\r\n with open(r'D:\\pornhub\\porn_movie\\sweet-bunny\\\\' + name + '.mp4', 'wb') as f:\r\n print('电影' + name + \"正在下载\")\r\n f.write(movie.content)\r\n break\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n target_urls = get_pages_urls('sweet-bunny', 2)\r\n print(len(target_urls))\r\n get_movieurls(target_urls)\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\n\r\n# movie_dict = {}\r\n# for name, url in zip(movie_name_list, all_list):\r\n# movie_dict[name] = url\r\n\r\n\r\n\r\n\r\n","repo_name":"KnightLI5/Spyder_Python","sub_path":"pornhub_download/porn_movie_spyder.py","file_name":"porn_movie_spyder.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38287286054","text":"import sys\nfrom collections import deque\n\nN, M, K = map(int, sys.stdin.readline().split())\n\nGRID = [['.' for _ in range(M)] for _ in range(N)]\n\nfor i in range(K):\n r, c = map(int, sys.stdin.readline().split())\n GRID[r - 1][c - 1] = '#'\n\nvisited = [[False for _ in range(M)] for _ in range(N)]\n\ndx = [1, 0, -1, 0]\ndy = [0, -1, 0, 1]\n\n\ndef bfs(y, x):\n cnt = 0\n to_visit = deque([(y, x)])\n visited[y][x] = True\n while to_visit:\n pos_y, pos_x = to_visit.popleft()\n cnt += 1\n for d in range(4):\n next_y, next_x = pos_y + dy[d], pos_x + dx[d]\n if 0 <= next_y < N and 0 <= next_x < M and not visited[next_y][next_x] and GRID[next_y][next_x] == \"#\":\n to_visit.append((next_y, next_x))\n visited[next_y][next_x] = True\n\n return cnt\n\n\nres = 0\nfor y in range(N):\n for x in range(M):\n if GRID[y][x] == \"#\":\n res = max(res, bfs(y, x))\n\nprint(res)","repo_name":"shiueo/PS","sub_path":"1743.py","file_name":"1743.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74400163145","text":"try:\n n = int(input(\"Ingrese un digito entre 1 y 10:\"))\n \n if n<1 or n>10:\n raise ValueError(\"Debe ser entre 1 y 10\")\n \nexcept ValueError as e:\n print(e)\n print(\"Ha ingresado un digito incorrecto\")\nexcept Exception as e:\n print(\"Ha ocurrido un error: \", e)\n ","repo_name":"Zynno-Dev/Progra1-Ex1","sub_path":"Compilado Examen 1/Clases/Clase 6/Ejemplos de Clase/Clase 6 - Ejemplo Raise.py","file_name":"Clase 6 - Ejemplo Raise.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7669076504","text":"from jsonDatabase import *\nfrom profileStats import restOfProfile\nfrom fightingSystem import userStats\ndef guildSignup(id, msg):\n if msg.content.lower() == 'y' or msg.lower == 'yes':\n if getDataValue(id, 'coin') >= 100:\n value = getDataValue(id, 'coin') - 100\n inserting(id, 'coin', value)\n inserting(id, 'guild', True)\n restOfProfile(id)\n userStats(id)\n insertingDic(id, 'enemy')\n return 'Now you are registrated!!'\n else:\n return \"Looks like you don't got enough\"\n else:\n return \"Okay, maybe later then\"","repo_name":"Absolut-AK/YukiChanBot","sub_path":"guild.py","file_name":"guild.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24805702769","text":"import logging\n# from flask import session, request\nfrom flask_socketio import (Namespace, emit)\n# , join_room, leave_room, close_room,\n# rooms, disconnect)\nfrom duckomatic.utils.resource import Resource\n\n\nclass Gps(Resource, Namespace):\n\n def __init__(self, *vargs, **kwargs):\n \"\"\" Constructor.\n Initialize the parent classes.\n \"\"\"\n super(Gps, self).__init__(*vargs, **kwargs)\n self._client_count = 0\n\n def handle_incoming_message(self, topic, data):\n if self._client_count > 0:\n logging.debug('%s: Client count: %d, Sending: \\\ntopic: \"%s\", \\\ndata: \"%s\"' %\n (self.namespace, self._client_count, topic, data))\n self.socketio.emit(\n topic, data, namespace=self.namespace)\n\n def start(self):\n self.start_processing_incoming_messages()\n\n def on_connect(self):\n self._client_count += 1\n emit('clients', {'data': 'Client connected',\n 'count': self._client_count})\n\n def on_disconnect(self):\n self._client_count -= 1\n print('%s: Client disconnected. Client count = %d' %\n (self.__class__, self._client_count))\n\n # def on_my_event(self, message):\n # session['receive_count'] = session.get('receive_count', 0) + 1\n # emit('my_response',\n # {'data': message['data'], 'count': session['receive_count']})\n\n # def on_my_broadcast_event(self, message):\n # session['receive_count'] = session.get('receive_count', 0) + 1\n # emit('my_response',\n # {'data': message['data'], 'count': session['receive_count']},\n # broadcast=True)\n\n # def on_join(self, message):\n # join_room(message['room'])\n # session['receive_count'] = session.get('receive_count', 0) + 1\n # emit('my_response',\n # {'data': 'In Gps rooms: ' + ', '.join(rooms()),\n # 'count': session['receive_count']})\n\n # def on_leave(self, message):\n # leave_room(message['room'])\n # session['receive_count'] = session.get('receive_count', 0) + 1\n # emit('my_response',\n # {'data': 'In rooms: ' + ', '.join(rooms()),\n # 'count': session['receive_count']})\n\n # def on_close_room(self, message):\n # session['receive_count'] = session.get('receive_count', 0) + 1\n # emit('my_response', {'data': 'Room ' + message['room']\n # + ' is closing.',\n # 'count': session['receive_count']},\n # room=message['room'])\n # close_room(message['room'])\n\n # def on_my_room_event(self, message):\n # session['receive_count'] = session.get('receive_count', 0) + 1\n # emit('my_response',\n # {'data': message['data'], 'count': session['receive_count']},\n # room=message['room'])\n\n # def on_disconnect_request(self):\n # session['receive_count'] = session.get('receive_count', 0) + 1\n # emit('my_response',\n # {'data': 'Disconnected!', 'count': session['receive_count']})\n # disconnect()\n\n # def on_my_ping(self):\n # emit('my_pong')\n","repo_name":"morgangalpin/duckomatic","sub_path":"duckomatic/api/resources/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21126985029","text":"import os\nimport subprocess\nimport sys\n\n\nINFRA_BOTS_DIR = os.path.dirname(os.path.realpath(__file__))\nSKIA_DIR = os.path.abspath(os.path.join(INFRA_BOTS_DIR, os.pardir, os.pardir))\n\n\ndef test(cmd, cwd):\n try:\n subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT, encoding='utf-8')\n except subprocess.CalledProcessError as e:\n return e.output\n\n\ndef python_unit_tests(train):\n if train:\n return None\n return test(\n [sys.executable, '-u', '-m', 'unittest', 'discover', '-s', '.', '-p',\n '*_test.py'],\n INFRA_BOTS_DIR)\n\n\ndef recipe_test(train):\n cmd = [\n sys.executable, '-u', os.path.join(INFRA_BOTS_DIR, 'recipes.py'), 'test']\n if train:\n cmd.append('train')\n else:\n cmd.append('run')\n return test(cmd, SKIA_DIR)\n\n\ndef gen_tasks_test(train):\n cmd = ['go', 'run', 'gen_tasks.go']\n if not train:\n cmd.append('--test')\n try:\n output = test(cmd, INFRA_BOTS_DIR)\n except OSError:\n return ('Failed to run \"%s\"; do you have Go installed on your machine?'\n % ' '.join(cmd))\n return output\n\n\ndef main():\n train = False\n if '--train' in sys.argv:\n train = True\n\n tests = (\n python_unit_tests,\n recipe_test,\n gen_tasks_test,\n )\n errs = []\n for t in tests:\n err = t(train)\n if err:\n errs.append(err)\n\n if len(errs) > 0:\n print('Test failures:\\n', file=sys.stderr)\n for err in errs:\n print('==============================', file=sys.stderr)\n print(err, file=sys.stderr)\n print('==============================', file=sys.stderr)\n sys.exit(1)\n\n if train:\n print('Trained tests successfully.')\n else:\n print('All tests passed!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"google/skia","sub_path":"infra/bots/infra_tests.py","file_name":"infra_tests.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":8112,"dataset":"github-code","pt":"81"} +{"seq_id":"40062683368","text":"from rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.decorators import api_view, permission_classes, authentication_classes\r\nfrom knox.auth import TokenAuthentication\r\nfrom django.http import JsonResponse\r\nfrom .models import TraineeQuestion, MentorAnswer\r\nfrom accounts.models import Mentors\r\nfrom .serializers import TraineeQuestionSerializer\r\nfrom rest_framework.response import Response\r\nfrom rest_framework import status\r\nfrom backend.custom_auth import isMentor\r\n\r\n\r\n@api_view(['GET'])\r\n@authentication_classes([TokenAuthentication])\r\n@permission_classes([IsAuthenticated, isMentor]) \r\ndef get_unanswered_questions(request):\r\n questions = TraineeQuestion.objects.filter(answered=False).exclude(answered_by=request.user)\r\n serializer = TraineeQuestionSerializer(questions, many=True)\r\n return Response(serializer.data)\r\n \r\n@api_view(['POST'])\r\n@authentication_classes([TokenAuthentication])\r\n@permission_classes([IsAuthenticated, isMentor])\r\ndef add_mentor_answer(request):\r\n question_id = request.data.get('question_id')\r\n try:\r\n question_instance = TraineeQuestion.objects.get(pk=question_id)\r\n question_instance.answered_by.add(request.user)\r\n MentorAnswer.objects.create(\r\n user=request.user,\r\n answer=request.data.get('answer'),\r\n question=question_instance\r\n )\r\n if question_instance.answered_by.count() >= 2:\r\n question_instance.answered = True\r\n question_instance.save()\r\n return Response({'message': 'MentorAnswer created successfully', 'question_id': question_id}, status=status.HTTP_201_CREATED)\r\n except TraineeQuestion.DoesNotExist:\r\n return Response({'error': 'Invalid question ID'}, status=status.HTTP_400_BAD_REQUEST)","repo_name":"darinvi/ATP","sub_path":"backend/mentor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35207440408","text":"import serial \nimport time\nimport pyautogui\nimport sys\n\nArduinoSerial = serial.Serial('COM6',9600)\ntime.sleep(2)\nprint (\"lets do some work\")\ntest=''\n\nwhile 1:\n \n incoming = str (ArduinoSerial.readline()) #read the serial data and print it as line\n \n if incoming!=test:\n \n if 'B_hello' in incoming:\n print ('Hello')\n\n if 'Bye' in incoming:\n print ('Bye')\n if 'Yes' in incoming:\n print ('Yes') \n \n test=incoming \n","repo_name":"wreakhead/Hand-gesture-recognition","sub_path":"3.readword.py","file_name":"3.readword.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1517436556","text":"from time import time\nimport os\nimport math\n\nfrom src.dict import dictionary\n\nif __name__ == '__main__':\n print(\"-----------------------Test szybkości biblioteki - część jednosegmentowa-----------------------\")\n for i in [100, 300, 600, 900]:\n filename = \"test_data/test_file_\" + str(i) + \".txt\"\n print(\"===============================================================================================\")\n print(\"Rozmiar testowanego pliku: \" + str(\n math.ceil(os.stat(filename).st_size / 1024)) + \"KB\")\n\n number_of_lines = 0\n with open(filename, 'r', encoding=\"utf8\") as f:\n for line in f:\n number_of_lines += 1\n print(\"Ilość wpisów w testowanym pliku: \", number_of_lines)\n\n start_of_building = time()\n test_dict = dictionary.Dictionary([filename])\n end_of_building = time()\n print(\"Czas budowy struktury: \" + str(end_of_building - start_of_building) + \"s\")\n print(\"Średni czas trwania przetworzenia jednego wpisu: \"\n + str((end_of_building - start_of_building) / number_of_lines))\n\n start_get_parent = time()\n test_dict.get_parent(\"Gdański\")\n end_get_parent = time()\n print(\"Czas odpytania o rodzica: \" + str(end_get_parent - start_get_parent) + \"s\")\n\n start_get_children = time()\n test_dict.get_parent(\"Gdański\")\n end_get_children = time()\n print(\"Czas odpytania o dziecko: \" + str(end_get_children - start_get_children) + \"s\")\n","repo_name":"kaszubab/Grammatical-Dictionary-of-Polish","sub_path":"src/test/test_dictionary_times.py","file_name":"test_dictionary_times.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9400960297","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/12/10 21:27\n# @Author : Jiefan\n\nclass Solution:\n def combinationSum2(self, candidates: 'List[int]', target: 'int') -> 'List[List[int]]':\n candidates.sort()\n def dfs(idx, t, cur):\n if t == 0:\n res.append(cur)\n return\n if t < 0: return\n for i in range(idx, len(candidates)):\n if i > idx and candidates[i] == candidates[i - 1]: continue # avoid duplication\n dfs(i + 1, t - candidates[i], cur + [candidates[i]])\n res = []\n dfs(0, target, [])\n return res\n\n'''\nGiven an integer array with all positive numbers and no duplicates, find the number of possible combinations that add up to a positive integer target.\n\nExample:\n\nnums = [1, 2, 3]\ntarget = 4\n\nThe possible combination ways are:\n(1, 1, 1, 1)\n(1, 1, 2)\n(1, 2, 1)\n(1, 3)\n(2, 1, 1)\n(2, 2)\n(3, 1)\n\nNote that different sequences are counted as different combinations.\n\nTherefore the output is 7.\n'''\n# this is different from coin change 2, becaue the order matters here, but for coin change 2, the result of\n# the given example is 4\n\ndef combinationSum4(nums, target: int) -> int:\n dp = [0] * (target + 1)\n dp[0] = 1\n for i in range(1, target + 1):\n for num in nums:\n if i >= num:\n dp[i] += dp[i - num]\n return dp[-1]\n\n","repo_name":"Jason003/interview","sub_path":"linkedin/Combination Sum.py","file_name":"Combination Sum.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30558337538","text":"# 세준이는 양수와 +, -, 그리고 괄호를 가지고 식을 만들었다. 그리고 나서 세준이는 괄호를 모두 지웠다.\n# 그리고 나서 세준이는 괄호를 적절히 쳐서 이 식의 값을 최소로 만들려고 한다.\n# 괄호를 적절히 쳐서 이 식의 값을 최소로 만드는 프로그램을 작성하시오.\n\n# 첫째 줄에 식이 주어진다. 식은 ‘0’~‘9’, ‘+’, 그리고 ‘-’만으로 이루어져 있고, 가장 처음과 마지막 문자는 숫자이다.\n# 그리고 연속해서 두 개 이상의 연산자가 나타나지 않고, 5자리보다 많이 연속되는 숫자는 없다.\n# 수는 0으로 시작할 수 있다. 입력으로 주어지는 식의 길이는 50보다 작거나 같다.\n\n# 첫째 줄에 정답을 출력한다.\n\n# https://www.acmicpc.net/problem/1541\n\n# 문제의 요지는 -과 - 사이의 값을 모두 더한다음 전체를 빼주는 것이었다. 파악하고나서는 쉬운데 알아내기까지 시간이 좀 걸릴 것 같다.\n\nstr = input()\nstrArr = str.split('-')\nsum = []\n\nfor num in strArr:\n tempSum = 0\n numArr = num.split('+')\n for target in numArr:\n tempSum += int(target)\n sum.append(tempSum)\nresult = sum[0]\n\nfor num in sum[1:]:\n result -= num\nprint(result)","repo_name":"Zabee52/study","sub_path":"algorithm/python/algorithm/baekjoon/lost_bracket1541.py","file_name":"lost_bracket1541.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6644915416","text":"from __future__ import annotations\n\nimport logging\nimport os\nfrom logging import FileHandler, LogRecord, StreamHandler\n\nfrom rich.console import Console\nfrom rich.text import Text\n\n\nclass RichConsoleHandler(StreamHandler):\n \"\"\"\n Logging handler for rich console output.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n self.console = Console()\n\n def emit(self, record: LogRecord) -> None:\n try:\n self.console.print(self.format(record)) # type: ignore\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n\nclass RichFileHandler(FileHandler):\n \"\"\"\n Logging handler for file output with stripped formatting.\n \"\"\"\n\n def __init__(self, filename: str) -> None:\n super().__init__(filename)\n\n def emit(self, record: LogRecord) -> None:\n record.msg = Text.from_markup(str(record.msg)).plain\n return super().emit(record)\n\n\nLOG_LEVEL = logging.DEBUG if os.getenv('RUN_MODE', '').lower() == 'debug' else logging.INFO\n\nlog = logging.getLogger('main')\nlog.setLevel(LOG_LEVEL)\n","repo_name":"adambelniak/WindForecast","sub_path":"src/wind_forecast/util/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"9962121274","text":"from flask import Blueprint, render_template, request, redirect, flash, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models.user import Users\nfrom db import db\n\ncrud = Blueprint(\"routes\", __name__)\n\n\n@crud.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@crud.route(\"/create\", methods=['POST'])\ndef create():\n nome = request.form.get('name')\n age = request.form.get('age')\n email = request.form.get('email')\n\n usuario = Users(\n nome=nome,\n age=age,\n email=email\n )\n db.session.add(usuario)\n db.session.commit()\n db.session.close()\n\n return redirect(url_for(\"routes.list\"))\n\n\n@crud.route(\"/list\")\ndef list():\n users = Users.query.all()\n return render_template(\"users.html\", users=users, len=len(users))\n\n\n@crud.route(\"/delete/\", methods=['GET', 'POST'])\ndef delete(email):\n data = Users.query.filter(Users.email == email).first()\n db.session.delete(data)\n db.session.commit()\n return redirect(url_for(\"routes.list\"))\n\n\n@crud.route(\"/update/\", methods=[\"POST\", \"GET\"])\ndef update(email):\n user = Users.query.filter(Users.email == email).first()\n\n user.nome = request.form.get('name')\n user.age = request.form.get('age')\n user.email = request.form.get('email')\n\n db.session.commit()\n\n return redirect(url_for(\"routes.list\"))\n","repo_name":"ImBard/CRUD-with-FLASK-n-SQLAlchemy","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23890642690","text":"\"\"\"\r\nLab 1, exercise 1: Random fasta and fastq file generator\r\nBioinformatics @ Politecnico di Torino\r\nAuthor: Silvia Giammarinaro\r\n\r\n\"\"\"\r\n\r\n# python ex1.py output_file nReads probA probT probC probG\r\n# Output file extension could be could be fa (fasta) or fq (fastq)\r\n\r\n# Example\r\n# python ex1.py output_ex1.fa 100 30 30 30 10\r\n\r\nimport numpy as np\r\n\r\nclass generator:\r\n\r\n def __init__(self, output_file, nRead, probA, probT, probC, probG):\r\n self.__output_file = output_file\r\n self.__nReads = nReads\r\n self.__probs = np.array([probA, probT, probC, probG])\r\n self.__probs = self.__probs/self.__probs.sum()\r\n self.__bases = np.array(['A', 'T', 'C', 'G'])\r\n \r\n def __getRandomBase(self):\r\n return np.random.choice(self.__bases, p = self.__probs)\r\n \r\n def __getRandomQualityScore(self):\r\n return chr(np.random.choice(np.arange(33, 127))) #Sanger format\r\n \r\n def generateFasta(self, bp):\r\n with open(self.__output_file, 'w') as f:\r\n for i in range(self.__nReads):\r\n f.write('>' + str(i) + '\\n')\r\n for _ in range(bp):\r\n f.write(self.__getRandomBase())\r\n f.write('\\n')\r\n \r\n def generateFastq(self, bp):\r\n with open(self.__output_file, 'w') as f:\r\n for i in range(self.__nReads):\r\n f.write('@' + str(i) + '\\n')\r\n for _ in range(bp):\r\n f.write(self.__getRandomBase())\r\n f.write('\\n')\r\n f.write('+' + str(i) + '\\n')\r\n for _ in range(bp):\r\n f.write(self.__getRandomQualityScore())\r\n f.write('\\n')\r\n\r\nimport sys \r\n\r\nif __name__ == \"__main__\":\r\n \r\n output_file = sys.argv[1]\r\n nReads = int(sys.argv[2])\r\n probA = float(sys.argv[3])\r\n probT = float(sys.argv[4])\r\n probC = float(sys.argv[5])\r\n probG = float(sys.argv[6])\r\n fg = generator(output_file, nReads, probA, probT, probC, probG)\r\n bp = 50\r\n \r\n if sys.argv[1][-1] == 'a':\r\n fg.generateFasta(bp)\r\n \r\n elif sys.argv[1][-1] == 'q':\r\n fg.generateFastq(bp)\r\n\r\n","repo_name":"sigeek/bioinformatics-labs","sub_path":"lab1/ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33617152846","text":"\"\"\"Prototyping code for rendering function definitions, invocations, and results.\n\nTypes are simplified for now to `str`.\n\nWe should actually support something like pydantic or jsonschema for the types, so\nwe can expand them recursively for nested types.\n\"\"\"\nimport abc\nfrom typing import Any, List, Optional\n\nfrom typing_extensions import NotRequired, TypedDict\n\n\nclass Parameter(TypedDict):\n \"\"\"Representation for a parameter.\"\"\"\n\n name: str\n type: str\n description: str\n\n\nclass Arguments(TypedDict):\n \"\"\"Arguments are passed to a function during function invocation.\"\"\"\n\n name: Optional[str]\n value: Any\n\n\nclass ReturnValue(TypedDict):\n \"\"\"Representation for a return value of a function call.\"\"\"\n\n type: str\n description: NotRequired[str]\n\n\nclass FunctionDefinition(TypedDict):\n \"\"\"Representation for a function.\"\"\"\n\n name: str\n description: str # Function description\n parameters: List[Parameter]\n return_value: ReturnValue\n\n\nclass FunctionInvocation(TypedDict):\n \"\"\"Representation for a function invocation.\"\"\"\n\n id: NotRequired[str]\n name: str\n arguments: List[Arguments]\n\n\nclass FunctionResult(TypedDict):\n \"\"\"Representation for a function result.\"\"\"\n\n id: NotRequired[str]\n name: str\n result: Optional[str]\n error: Optional[str]\n\n\nclass Visitor(abc.ABC):\n @abc.abstractmethod\n def visit_function_definition(self, function_definition: FunctionDefinition) -> str:\n \"\"\"Render a function.\"\"\"\n\n @abc.abstractmethod\n def visit_function_definitions(\n self, function_definitions: List[FunctionDefinition]\n ) -> str:\n \"\"\"Render a function.\"\"\"\n\n @abc.abstractmethod\n def visit_function_invocation(self, function_invocation: FunctionInvocation) -> str:\n \"\"\"Render a function invocation.\"\"\"\n\n @abc.abstractmethod\n def visit_function_result(self, function_result: FunctionResult) -> str:\n \"\"\"Render a function result.\"\"\"\n\n\nclass AstPrinter(Visitor):\n \"\"\"Print the AST.\"\"\"\n\n\nclass XMLEncoder(AstPrinter):\n def visit_function_definition(self, function_definition: FunctionDefinition) -> str:\n \"\"\"Render a function.\"\"\"\n parameters_lines = []\n\n for parameter in function_definition[\"parameters\"]:\n parameters_lines.extend(\n [\n \"\",\n f\"{parameter['name']}\",\n f\"{parameter['type']}\",\n f\"{parameter['description']}\",\n \"\",\n ]\n )\n lines = [\n \"\",\n f\"{function_definition['name']}\",\n \"\",\n f\"{function_definition['description']}\",\n \"\",\n \"\",\n *parameters_lines,\n \"\",\n \"\",\n f\"{function_definition['return_value']['type']}\",\n ]\n if function_definition[\"return_value\"].get(\"description\"):\n lines.append(\n f\"{function_definition['return_value']['description']}\"\n f\"\"\n )\n\n lines.extend([\"\", \"\"])\n return \"\\n\".join(lines)\n\n def visit_function_definitions(\n self, function_definitions: List[FunctionDefinition]\n ) -> str:\n \"\"\"Render a function.\"\"\"\n strs = [\n self.visit_function_definition(function_definition)\n for function_definition in function_definitions\n ]\n return \"\\n\" + \"\\n\".join(strs) + \"\\n\"\n\n def visit_function_invocation(self, invocation: FunctionInvocation) -> str:\n \"\"\"Render a function invocation.\"\"\"\n arguments_as_strings = [\n \"\\n\"\n f\"{argument['name']}\\n\"\n f\"{argument['value']}\\n\"\n \"\\n\"\n for argument in invocation[\"arguments\"]\n ]\n lines = [\"\"]\n\n if invocation.get(\"id\"):\n lines.append(f\"{invocation['id']}\")\n\n lines.extend(\n [\n f\"{invocation['name']}\\n\"\n \"\\n\"\n f\"{''.join(arguments_as_strings)}\" # Already includes trailing newline\n \"\\n\"\n \"\"\n ]\n )\n return \"\\n\".join(lines)\n\n def visit_function_result(self, function_result: FunctionResult) -> str:\n \"\"\"Render a function result.\"\"\"\n lines = [\n \"\",\n ]\n\n if function_result.get(\"id\"):\n lines.append(f\"{function_result['id']}\")\n\n lines.append(f\"{function_result['name']}\")\n\n if function_result[\"error\"]:\n lines.extend(\n [\n f\"{function_result['error']}\",\n ]\n )\n else:\n lines.append(\n f\"{function_result['result']}\",\n )\n\n lines.append(\"\")\n\n return \"\\n\".join(lines)\n\n\nclass TypeScriptEncoder(AstPrinter):\n def visit_function_definition(self, function_definition: FunctionDefinition) -> str:\n \"\"\"Render a function.\"\"\"\n parameters_as_strings = [\n f\"{parameter['name']}: {parameter['type']}\"\n for parameter in function_definition[\"parameters\"]\n ]\n # Let's use JSdoc style comments\n # First the function description\n lines = [\n f\"// {function_definition['description']}\",\n # Then the parameter descriptions\n *[\n f\"// @param {parameter['name']} {parameter['description']}\"\n for parameter in function_definition[\"parameters\"]\n ],\n # Then the return value description\n f\"// @returns {function_definition['return_value']['description']}\",\n # Then the function definition\n f\"function {function_definition['name']}(\"\n f\"{', '.join(parameters_as_strings)}): \"\n f\"{function_definition['return_value']['type']};\",\n ]\n\n # finally join\n function = \"\\n\".join(lines)\n return function\n\n def visit_function_definitions(\n self, function_definitions: List[FunctionDefinition]\n ) -> str:\n \"\"\"Render a function.\"\"\"\n strs = [\n self.visit_function_definition(function_definition)\n for function_definition in function_definitions\n ]\n return \"\\n\\n\".join(strs)\n\n def visit_function_invocation(self, invocation: FunctionInvocation) -> str:\n \"\"\"Render a function invocation.\"\"\"\n arguments_as_strings = [\n f\"{argument['name']}: {argument['value']}\"\n for argument in invocation[\"arguments\"]\n ]\n lines = [f\"{invocation['name']}(\" f\"{', '.join(arguments_as_strings)});\"]\n return \"\\n\".join(lines)\n\n def visit_function_result(self, function_result: FunctionResult) -> str:\n \"\"\"Render a function result.\"\"\"\n lines = []\n if function_result[\"error\"]:\n lines.append(f\"ERROR: {function_result['error']}\")\n else:\n lines.append(f\"> {function_result['result']}\")\n if function_result.get(\"id\"):\n lines.append(f\"// ID: {function_result['id']}\")\n return \"\\n\".join(lines)\n","repo_name":"langchain-ai/langchain-benchmarks","sub_path":"langchain_benchmarks/tool_usage/agents/experimental/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":7619,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"81"} +{"seq_id":"6470635692","text":"#!/usr/bin/env python\n\nfrom scipy.interpolate import interp1d\nfrom pylab import *\nfrom scipy.signal import savgol_filter\n\n\ndef detect_step(xs, threshold=5.0, width=10, smoothing = 0.7):\n from scipy.signal import find_peaks_cwt\n detected_steps = zeros(len(xs))\n detected_steps_binary = zeros(len(xs))\n detected_steps_height = zeros(len(xs))\n total_smoothing_weight = 0\n for center in arange(width, len(xs) - width):\n sum_before = 0\n sum_from = 0\n for i in arange(center - width, center):\n sum_before *= smoothing\n sum_before += xs[i]\n total_smoothing_weight *= smoothing\n total_smoothing_weight += 1\n # for i in arange(center, center + width): <- in reverse\n for i in arange(center + width -1, center -1, -1):\n sum_from *= smoothing\n sum_from += xs[i]\n detected_steps[center] = float(sum_from - sum_before)\n \n current_max = 0\n current_max_index = 0\n for i, x in enumerate(detected_steps):\n if (abs(x) > threshold):\n if (abs(x) > abs(current_max)):\n current_max_index = i\n current_max = x\n else:\n # end of over threshold series if current_max != 0\n if current_max > 0:\n detected_steps_binary[current_max_index] = 1\n detected_steps_height[current_max_index] = mean([xs[j] for j in range(current_max_index + width/2, current_max_index+width)]) - mean([xs[j] for j in range(current_max_index - width, current_max_index-width/2)])\n elif current_max < 0:\n detected_steps_binary[current_max_index] = -1\n detected_steps_height[current_max_index] = mean([xs[j] for j in range(current_max_index + width/2, current_max_index+width)]) - mean([xs[j] for j in range(current_max_index - width, current_max_index-width/2)])\n current_max = 0\n current_max_index = 0\n\n return detected_steps_binary, detected_steps_height\n\n##Smoothing data\nt = df_baro.index - df_baro.index[0]\nt = [_t.seconds for _t in t]\ndf_baro['t'] = t\n\nwindow_size, poly_order = 11, 3\nitp = interp1d(t, df_baro['alt'], kind='linear')\ndf_baro['alt_fil'] = savgol_filter(itp(t), window_size, poly_order)\n\n\ndf_baro['alt_steps'], df_baro['alt_steps_height'] = detect_step(df_baro['alt'], width=DATA[DATASET]['detect_step_width'])\ndf_baro['alt_steps'] = 1 * df_baro['alt_steps']\n\n","repo_name":"thehackersgroup/hack","sub_path":"analyze_baro.py","file_name":"analyze_baro.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31713843258","text":"# Write your solution here\n# You can test your function by calling it within the following block\n\ndef first_word(word):\n\n i = word.find(\" \")\n return word[:i]\n\ndef second_word(word):\n\n i = word.find(\" \")\n pord = word[i+1:]\n i = pord.find(\" \")\n if i == -1:\n return pord\n else:\n return pord[:i]\n \ndef last_word(word):\n i = \"\"\n o = -1\n new = \"\"\n while i != \" \":\n \n i = word[o]\n o -= 1\n new += i\n x = \"\"\n for i in range(len(new)):\n \n x += new[(1+i)*(-1)]\n return (x[1:])\n\n\nif __name__ == \"__main__\":\n sentence = \"first second\"\n print(first_word(sentence))\n print(second_word(sentence))\n print(last_word(sentence))","repo_name":"DavidRanland/misc-code","sub_path":"python oppgaver/mooc-programming-22/part04-11_first_second_last/src/first_second_last.py","file_name":"first_second_last.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23173814270","text":"import controller\nfrom models import *\nfrom helpers.utils import *\n\ndef track(sender, message):\n\n parsed_message = split_message(message)\n\n if is_track_message_format_valid(parsed_message) == False:\n raise UserInputException('Please enter valid command. eg: /track xci lesser/greater 500')\n if is_track_message_input_valid(parsed_message) == False:\n raise UserInputException('Please enter valid command. eg: /track xci lesser/greater 500')\n\n index_name, operator, target_price = extract_track_message(parsed_message)\n\n if controller.index_statistics.does_index_exist(index_name) == False:\n raise UserInputException(\"Index {} Not Found\".format(index_name))\n if index_name == \"xj1\" or index_name == \"xj3\" or index_name ==\"xj4\":\n raise UserInputException(\"Oops index is not available yet. Stay tuned.\")\n\n date, time = get_current_date_time()\n\n controller.index_tracker.add_index_tracker(index_name, operator, target_price, sender, date, time)\n tracker = Tracker(index_name, operator, target_price, sender, date, time)\n return tracker\n\ndef notify():\n notifications = []\n trackers = controller.index_tracker.get_all_trackers()\n\n for tracker in trackers:\n index = controller.index_statistics.get_latest_index(tracker.index_name)\n target_price = tracker.target_price\n operator = tracker.operator\n\n if (operator == \"lesser\"):\n if ( index.price < target_price ):\n notifications.append(\"Hey @{}, the index price for {} is now lesser than ${}. Make a move!\".format(tracker.sender, tracker.index_name, tracker.target_price))\n controller.index_tracker.delete_index_tracker(tracker.index_name, tracker.operator, tracker.target_price, tracker.sender)\n elif (operator == \"greater\"):\n if ( index.price > target_price ):\n notifications.append(\"Hey @{}, the index price for {} is now greater than ${}. Make a move!\".format(tracker.sender, tracker.index_name, tracker.target_price))\n controller.index_tracker.delete_index_tracker(tracker.index_name, tracker.operator, tracker.target_price, tracker.sender)\n\n return notifications\ndef extract_track_message(parsed_message):\n index_name = parsed_message[1]\n operator = parsed_message[2]\n target_price = float(parsed_message[3])\n\n return index_name, operator, target_price\n\n\ndef is_track_message_input_valid(parsed_message: list):\n index_name = parsed_message[1]\n operator = parsed_message[2]\n target_price = parsed_message[3]\n if operator == \"lesser\" or operator == \"greater\":\n if is_float(target_price):\n return True\n\n return False\n\ndef is_track_message_format_valid(parsed_message: list):\n if len(parsed_message) == 4:\n return True\n return False","repo_name":"m3elabs/Xchange-TG","sub_path":"processes/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"43514859722","text":"# takes in pop size, returns networks\n# takes in ids => fitness dict. Returns new population\n# creates species and networks\nfrom network import Network\nfrom species import Species\nimport math\nimport numpy\n\nclass Population:\n def __init__(self, size, ins, outs, recurrent):\n self.size = size\n self.networks = []\n self.max_id = 0\n self.max_species_id = 1\n self.generation = 0\n self.cull = int(0.5 * self.size)\n for i in range(size):\n self.networks.append(\n Network(ins, outs, recurrent, self.max_id, {\"neurons\": [[], []], \"connections\": [], \"recurrents\": []}, True, False, 0, (255, 0, 0)))\n self.max_id += 1\n self.species = [Species(0, self.networks[0].dna, (255, 0, 0))]\n # self.neuron_ids = set()\n self.this_to_add = 0\n self.fitness_sum = 0\n self.chosen = dict()\n self.divergence_threshold = 1.5\n\n def get_pop(self):\n return self.networks\n\n def next_gen(self, pop):\n self.chosen.clear()\n self.generation += 1\n string = \"////////////\" + str(self.generation) + \"///////////////\"\n if len(self.species) > 1:\n string += \" \" + str(len(self.species[1].members))\n print(string)\n self.speciate(pop)\n self.extinct_species(0)\n self.cull_and_replace()\n self.extinct_species(0)\n self.networks = []\n for s in self.species:\n for i in s.chosen:\n self.chosen[str(i)] = s.id\n self.networks += s.members\n for p in self.networks:\n p.reset()\n\n # self.get_unique_ids()\n\n return self.networks\n\n def cull_and_replace(self):\n fitness_sum = 0\n pass_on_extra = 0\n\n starting_id = self.size * self.generation\n\n for s in self.species:\n fitness = s.set_fitness()\n fitness_sum += fitness\n\n self.fitness_sum = fitness_sum\n\n pop_track = 0\n\n ratio = 0\n ratios = []\n\n self.species = sorted(self.species, key=lambda x: x.fitness, reverse=True)\n\n for i in range(len(self.species) - 1, -1, -1):\n s = self.species[i]\n ratio = 0.25 / (1.25**len(self.species) - 1) if ratio == 0 else ratio * 1.25\n ratios.append(ratio)\n to_add = math.floor(self.size * ratio) + pass_on_extra\n\n if len(s.members) > 5:\n pass_on_extra = s.birth(to_add, True, starting_id)\n else:\n pass_on_extra = s.birth(to_add, False, starting_id)\n starting_id += to_add\n pop_track += to_add\n\n if pop_track < self.size:\n to_add = self.size - pop_track\n self.species[0].birth_extra(to_add, starting_id)\n starting_id += to_add\n\n def speciate(self, pop):\n for s in self.species:\n s.members = []\n self.species = sorted(self.species, key=lambda x: x.fitness, reverse=True)\n for p in range(len(pop) - 1, -1, -1):\n closest = 9999\n species = None\n for i in range(len(self.species)):\n s = self.species[i] \n check_stale = False if i <= 5 else True\n how_close = s.matches_species(pop[p], check_stale)\n if how_close < closest:\n species = s\n closest = how_close\n if closest > self.divergence_threshold:\n self.species.append(Species(self.max_species_id, pop[p].dna, (numpy.random.randint(255),numpy.random.randint(255),numpy.random.randint(255))))\n self.max_species_id += 1\n self.species[-1].members.append(pop[p])\n else:\n species.members.append(pop[p])\n\n def extinct_species(self, min):\n for i in range(len(self.species) - 1, -1, -1):\n if len(self.species[i].members) <= min:\n print(\"Species \" + str(self.species[i].id) + \"went extinct :/\")\n del self.species[i]\n\n def get_unique_ids(self):\n self.neuron_ids.clear()\n for n in self.networks:\n self.neuron_ids.update(n.neuron_ids)\n","repo_name":"imconfusednow/neat","sub_path":"population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19768987029","text":"import os,sys\n\nipt_file = sys.argv[1] \n\nf = open(ipt_file,'r')\nraw_Words = []\nlines = f.readlines()\nfor line in lines:\n\tline = line.split('\\n')[0]\n\traw_Words.extend(line.split(' '))\nf.close() \n\n#print(raw_Words)\n#idxs = sorted(range(len(raw_Words)), key=lambda k: raw_Words[k])\n#Sorted_Wrd = [ raw_Words[s] for s in idxs ] \n\nmy_dict = {i:raw_Words.count(i) for i in raw_Words}\nKeys = list(my_dict.keys())\n#print(Keys)\n#exit(1)\n\nkey_p = [ raw_Words.index(key) for key in Keys ] \nmp = sorted(range(len(key_p)), key=lambda k: key_p[k])\n\nf = open(\"Q1.txt\",'w')\nfor i in range(len(mp)) :\n\t#print(Keys[mp[i]])\n\tif i == len(mp) - 1:\n\t\tf.write(\"%s %d %d\"%(Keys[mp[i]],i,my_dict[Keys[mp[i]]]))\n\telse:\n\t\tf.write(\"%s %d %d\\n\"%(Keys[mp[i]],i,my_dict[Keys[mp[i]]]))\n\t\n\t#print(\"%s %d %d\\n\",key,my_dict[key],\nf.close()\n","repo_name":"kaihsin/ML2017FALL","sub_path":"hw0/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23074095793","text":"def balanceIndex(array):\n\n sum_right = 0\n sum_left = sum(array[1:])\n if sum_left == 0:\n return 0\n for i in range(1,len(array)):\n sum_right += array[i-1]\n sum_left-= array[i]\n if sum_left == sum_right:\n return i\n\n return -1\n\n\n\n\n\n\n\narray = [0,9,-8,2,7,1,11,-2,1]\n\nprint(balanceIndex(array))\n","repo_name":"Dimitri-Kfoury/AlgoExpertQuestions","sub_path":"Assessment_3/Question_2.py","file_name":"Question_2.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3767314897","text":"\"\"\"Contains helpers for requests tests.\"\"\"\n\nfrom tasksapi.constants import DOCKER\n\n\nTEST_CONTAINER_TASK_TYPE_DICT = dict(\n name=\"my-task-type\",\n description=\"Fantastic task type\",\n container_image=\"mwiens91/hello-world\",\n container_type=DOCKER,\n command_to_run=\"/app/hello_world.py\",\n logs_path=\"/logs/\",\n results_path=\"/results/\",\n environment_variables=[\"HOME\"],\n required_arguments=[\"name\"],\n required_arguments_default_values={\"name\": \"AzureDiamond\"},\n)\n\nTEST_EXECUTABLE_TASK_TYPE_DICT = dict(\n name=\"my-task-type\",\n description=\"Fantastic task type\",\n command_to_run=\"true\",\n environment_variables=[\"HOME\"],\n required_arguments=[\"name\"],\n required_arguments_default_values={\"name\": \"AzureDiamond\"},\n)\n\nTEST_TASK_QUEUE_DICT = dict(\n name=\"my-task-queue\", description=\"Fantastic task queue\", whitelists=[1]\n)\n\nTEST_TASK_WHITELIST_DICT = dict(\n name=\"my-task-whitelist\",\n description=\"Fantastic task whitelist\",\n whitelisted_container_task_types=[1],\n whitelisted_executable_task_types=[1],\n)\n","repo_name":"saltant-org/saltant","sub_path":"tasksapi/tests/requests_tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"26679623930","text":"from django.db import models\nfrom django.core.validators import MaxLengthValidator\n# Create your models here.\n\n\nclass Userdata(models.Model):\n ROLE_CHOICES = (\n ('student','student'),\n ('faculty','faculty'),\n ('admin','admin'),\n )\n email = models.CharField(max_length=100,primary_key=True)\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n userrole = models.CharField(max_length=20,choices=ROLE_CHOICES,default=\"student\")\n\n\n def __str__(self):\n return self.email\n\nclass Cohort(models.Model):\n id = models.AutoField(primary_key=True,editable=True)\n origin=models.BooleanField(default=False)\n title = models.CharField(max_length=200,validators=[MaxLengthValidator(100)])\n email = models.ForeignKey(Userdata, on_delete=models.CASCADE,default='0',db_constraint=False)\n \n\n def __str__(self):\n return self.title\n\nclass Subcohort(models.Model):\n cohort_id=models.ForeignKey(Cohort,on_delete=models.CASCADE,default=0,db_constraint=False,related_name='subcohortid')\n parent_id=models.ForeignKey(Cohort,on_delete=models.CASCADE,default=0,db_constraint=False,related_name='subparentid')\n\nclass Cohortpdfs(models.Model):\n id = models.AutoField(primary_key=True,editable=True)\n title= models.CharField(max_length=200,validators=[MaxLengthValidator(100)])\n cohort_id=models.ForeignKey(Cohort,on_delete=models.CASCADE,default=0,db_constraint=False)\n content_date = models.DateField( auto_now_add=True, blank=True)\n content_time = models.TimeField( auto_now_add=True, blank=True)\n qn_published = models.BooleanField(default=False)\n\nclass Cohortstudent(models.Model):\n email = models.ForeignKey(Userdata, on_delete=models.CASCADE)\n cohort=models.ForeignKey(Cohort,on_delete=models.CASCADE,default=0,db_constraint=False)\n\nclass facedetails(models.Model):\n email = models.CharField(max_length=100)\n fullname=models.CharField(max_length=200)\n\nclass readattempt(models.Model):\n email = models.CharField(max_length=100)\n pdf = models.IntegerField()\n total = models.IntegerField()\n onscreen = models.IntegerField()\n sus = models.IntegerField()\n\n\nclass Question(models.Model):\n type=models.CharField(max_length=20)\n cohort=models.ForeignKey(Cohort,on_delete=models.CASCADE)\n source = models.IntegerField(default=0)\n\nclass Mcq(models.Model):\n question=models.ForeignKey(Question,on_delete=models.CASCADE)\n question_title=models.CharField(max_length=200, default=\"-\")\n option1=models.CharField(max_length=100)\n option2=models.CharField(max_length=100)\n option3=models.CharField(max_length=100)\n option4=models.CharField(max_length=100)\n correct_answer=models.CharField(max_length=100)\n\n\nclass Cq(models.Model):\n question=models.ForeignKey(Question,on_delete=models.CASCADE)\n question_title=models.CharField(max_length=200, default=\"-\")\n correct_answer=models.CharField(max_length=600)\n\nclass quizattempt(models.Model):\n email= models.ForeignKey(Userdata, on_delete=models.CASCADE,default='0',db_constraint=False)\n source = models.IntegerField(default=0)\n starttime = models.FloatField(default=0.0)\n\nclass Answer(models.Model):\n question=models.ForeignKey(Question,on_delete=models.CASCADE)\n email= models.ForeignKey(Userdata, on_delete=models.CASCADE,default='0',db_constraint=False)\n studentans = models.CharField(max_length=600)\n source = models.IntegerField(default=0)\n\nclass Evaluate(models.Model):\n email= models.ForeignKey(Userdata, on_delete=models.CASCADE,default='0',db_constraint=False)\n question=models.ForeignKey(Question,on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n source = models.IntegerField(default=0)\n\nclass Result(models.Model):\n email= models.ForeignKey(Userdata, on_delete=models.CASCADE,default='0',db_constraint=False)\n point = models.IntegerField(default=0)\n source = models.IntegerField(default=0)\n\nclass GeeksModel(models.Model):\n \n # fields of the model\n title = models.CharField(max_length = 200)\n description = models.TextField()\n \n # renames the instances of the model\n # with their title name\n def __str__(self):\n return self.title","repo_name":"redwanalirafi/SmartEdu-A-smart-online-based-education-platform","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10867895389","text":"## Definir número ideal de clusters\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\n\r\nData = pd.read_csv('C:/Users/LUCAS MATHEUS/Desktop/TCC/DADOS/DatasetAlunosEvasao.csv')\r\n\r\nX = np.array(Data)\r\n\r\nwcss = []\r\n\r\nfor i in range(1, 11):\r\n kmeans = KMeans(n_clusters=i, init='k-means++', random_state=0)\r\n kmeans.fit(X)\r\n wcss.append(kmeans.inertia_)\r\n\r\nplt.plot(range(1, 11), wcss)\r\nplt.title('Método do cotovelo')\r\nplt.xlabel('Número de clusters')\r\nplt.ylabel('WCSS')\r\nplt.show()\r\n\r\n","repo_name":"LucasMatheusSA/DataAnalytics-Python-R","sub_path":"Comandos e Scripts/parte1.py","file_name":"parte1.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30772722463","text":"import os\nos.chdir(\"../\")\n\nfrom config import load_config\n\nconfig = load_config.config()\nfrom functions.estacionaridad import estacionaridadYCointegracion\nimport datetime as dt\nfrom utils.dataframes import work_dataframes\nfrom plots import plots_iniciales\nimport pandas as pd\n\nif __name__ == \"__main__\":\n #analisis de los seleccionados para energias\n data = pd.read_csv(\"./data/raw/energies/dayly_energy.csv\", index_col=0)\n\n\n data.index = pd.to_datetime(data.index)\n fecha_minima = config[\"series_temporales\"][\"fecha_minima\"]\n fecha_minima = dt.datetime.strptime(fecha_minima, \"%Y-%m-%d\")\n data = data.loc[data.index > fecha_minima]\n dataframe_resultados = pd.DataFrame(columns=[\"accion\", \"fecha\", \"hurst_rs\",\"hurst_dma\",\"hurst_dsod\",\"hurst_diffusion\", \"adf_test\", \"lambda\"])\n dataframe_resultados.set_index([\"accion\", \"fecha\"], inplace=True)\n\n for column in data.columns:\n data2 = data.loc[:, [column]].dropna()\n fechas = work_dataframes.get_lookbacks_(data2, 4)\n plots_iniciales.plot_serie_temporal(data2, column, column, num_plots=4,\n archivo=\"reports/energies/\" + column + \".jpg\")\n\n\n\n for fecha in fechas:\n\n data_aux = data2.loc[data2.index > fecha]\n resultadosEst = estacionaridadYCointegracion.analisis_estacionaridad(data_aux.loc[:, column])\n print(data_aux.tail())\n array=[]\n for h in resultadosEst[\"hurst\"]:\n array.append(h)\n array =array+ [resultadosEst[\"results_adf\"][\"p-value\"], resultadosEst[\"lambda_adf\"]]\n dataframe_resultados.loc[(column, fecha), [\"hurst_rs\",\"hurst_dma\",\"hurst_dsod\",\"hurst_diffusion\", \"adf_test\", \"lambda\"]] = array\n print(\"Hurst: {}\".format(str(resultadosEst[\"hurst\"])))\n print(\"Lambda adf: {}\".format(resultadosEst[\"lambda_adf\"]))\n print(\"Test adf: {}\".format(resultadosEst[\"results_adf\"]))\n\n\n dataframe_resultados.to_csv(\"reports/energies/estacionaridad_univariante\" + str(dt.date.today())+\".csv\")\n","repo_name":"webclinic017/marketAnalyzer","sub_path":"index/main_energies_univariante.py","file_name":"main_energies_univariante.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31789948738","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}\ncsv_file = 'metacrict-top-games'\n\ndef get_games(games_url):\n response = requests.get(games_url, headers=headers)\n\n if (response.status_code == 200):\n soup = BeautifulSoup(response.content, 'html.parser')\n trs = soup.find_all('tr', attrs={'class': None})\n\n games = []\n for tr in trs:\n game_score = int(tr.find('div', attrs={'class': 'metascore_w'}).text)\n\n game_title = tr.find('h3').text\n game_url = tr.find('a', attrs={'class': 'title'})['href']\n response = requests.get('https://www.metacritic.com' + game_url, headers=headers)\n \n if (response.status_code == 200):\n soup = BeautifulSoup(response.content, 'html.parser')\n critics = soup.find('span', attrs={'class': 'based' }).findNext('span').text.strip()\n \n game = {\n 'game-title': game_title,\n 'game-score': game_score,\n 'number-of-critics': critics\n }\n\n games.append(game)\n\n return games\n\nwith open(\"../data/\" + csv_file + \".csv\", \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n\n games = get_games('https://www.metacritic.com/browse/games/score/metascore/all/all/filtered')\n\n writer.writerow(['Game Title', 'Game Score', 'Number of Critics'])\n\n for game in games:\n writer.writerow([game['game-title'], game['game-score'], game['number-of-critics']])","repo_name":"openvideogamedata/openvideogamedata.github.io","sub_path":"src/get-metacrict.py","file_name":"get-metacrict.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"11547281706","text":"from src.lib.DataStructure import DataStructure\n\n\nclass ParameterStorage(DataStructure):\n def __repr__(self):\n parameters = vars(self)\n\n output = ''\n for name in parameters.keys():\n output += (str(name) + ': ' + str(parameters[name]) + '\\n')\n return output\n","repo_name":"shayansm2/metaalgolib","sub_path":"src/problems/lib/ParameterStorage.py","file_name":"ParameterStorage.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73239020745","text":"from selenium.webdriver.common.by import By\nimport unittest\n\nclass First_selection:\n def __init__(self, myDriver):\n self.driver = myDriver\n self.result_num = (By.CLASS_NAME, 'srp-controls__count-heading')\n self.products_name = (By.CLASS_NAME, 's-item__title')\n self.products_price = (By.CLASS_NAME, 's-item__price')\n\n def get_amount(self):\n number = (self.driver.find_element(*self.result_num).text)\n print('The amount of shoes PUMA size 10 is ' , number)\n\n def test_order(self):\n list_prices = self.driver.find_elements(*self.products_price)\n list_prices_num = []\n for i in range(5):\n x = float(list_prices[i].text[3:])\n list_prices_num.append(x)\n\n tc = unittest.TestCase('__init__')\n tc.assertTrue (list_prices_num[0] <= list_prices_num[1] <= list_prices_num[2] <= list_prices_num[3] <= list_prices_num[4])\n\n if (list_prices_num[0] <= list_prices_num[1] <= list_prices_num[2] <= list_prices_num[3] <= list_prices_num[4]):\n print('\\n','The sort option Precio + Envío: más bajo primero is working as expected')\n\n def get_info_asc(self):\n list_names = self.driver.find_elements(*self.products_name)\n list_prices = self.driver.find_elements(*self.products_price)\n print('\\n', 'Products ordered by price, ascendant')\n for i in range(5):\n print(list_prices[i].text, list_names[i+1].text)\n","repo_name":"mvickm/QATestBL","sub_path":"Pages/First_selection.py","file_name":"First_selection.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24317284678","text":"import os, json, bot, auth, logging, traceback\n\nfrom bottle import ( \n run, post, route, get, response, request as bottle_request\n)\n\nlogging.basicConfig(level=logging.INFO, format='%(levelname)s:%(filename)s:%(funcName)s:%(asctime)s:%(message)s')\n\n@post('/') # Local server for testing (not actually deployed)\ndef main():\n try:\n logging.info(json.dumps(bottle_request.json, indent=4, sort_keys=True))\n return bot.handler({\"body\": bottle_request.json}, \"local server\")\n except Exception as err:\n logging.error(traceback.format_exc())\n return {\n \"error\": str(err)\n }\n\t\n@get('/auth')\ndef authentication():\n try:\n event = {\n \"queryStringParameters\": {\n \"code\": bottle_request.query.get(\"code\"),\n \"state\": bottle_request.query.get(\"state\"),\n },\n \"headers\": {\n \"Host\": bottle_request.urlparts.netloc\n },\n \"requestContext\": {\n \"path\": bottle_request.urlparts.path\n } \n }\n logging.info(json.dumps(event, indent=4, sort_keys=True))\n return auth.handler(event, \"local server\")\n except Exception as err:\n logging.error(traceback.format_exc())\n return {\n \"error\": str(err)\n }\n\nif __name__ == '__main__': \n run(host='localhost', port=5000, debug=True)\n","repo_name":"OnePunMan/anibot","sub_path":"local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"24660628380","text":"# Farokh Confectioner\n# Fabio Di Troia\n# Gathers all the Anrdoid XML Permission files into a single folder\n\nimport sys\nimport os\nimport shutil\n\n# args[1]: The directory containing all of the decompiled Android APK files\n# args[2]: The directory to copy all of the xml permissions to\ndef main(args):\n MINIMUM_ARGS = 3\n if len(args) != MINIMUM_ARGS:\n print(\"Please enter correct number of args\")\n exit(1)\n\n PERMISSION_DIR = args[1]\n COPY_DIR = args[2]\n copyPermissions(PERMISSION_DIR, COPY_DIR)\n\ndef copyPermissions(permission_dir, copy_dir):\n try:\n PERMISSION_FILE_NAME = \"AndroidManifest.xml\"\n for root, dirs, files in os.walk(permission_dir, topdown=False):\n for name in dirs:\n perm_file = os.path.join(root, name, PERMISSION_FILE_NAME)\n if(os.path.isfile(perm_file)):\n shutil.copyfile(perm_file, os.path.join(copy_dir, name + \"_\" + PERMISSION_FILE_NAME))\n except Exception as e:\n print(str(e))\n exit(1)\n print(\"Done\")\n\nif __name__ == \"__main__\":\n main(sys.argv)","repo_name":"FarokhC/Android-Malware-Detection-2","sub_path":"copyXMLPermissions.py","file_name":"copyXMLPermissions.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24110117017","text":"from typing import Any, Callable, List, Tuple\n\nimport numpy as np\nimport torch\nfrom scipy.sparse import csc_matrix, csr_matrix, lil_matrix\n\nfrom theseus.constants import DeviceType\n\n\ndef _mat_vec_cpu(\n batch_size: int,\n num_cols: int,\n A_row_ptr: torch.Tensor,\n A_col_ind: torch.Tensor,\n A_val: torch.Tensor,\n v: torch.Tensor,\n) -> torch.Tensor:\n assert batch_size == A_val.shape[0]\n num_rows = len(A_row_ptr) - 1\n retv_data = np.array(\n [\n csr_matrix((A_val[i].numpy(), A_col_ind, A_row_ptr), (num_rows, num_cols))\n * v[i]\n for i in range(batch_size)\n ],\n dtype=np.float64,\n )\n return torch.tensor(retv_data, dtype=torch.float64)\n\n\ndef mat_vec(\n batch_size: int,\n num_cols: int,\n A_row_ptr: torch.Tensor,\n A_col_ind: torch.Tensor,\n A_val: torch.Tensor,\n v: torch.Tensor,\n) -> torch.Tensor:\n if A_row_ptr.device.type == \"cuda\":\n try:\n from theseus.extlib.mat_mult import mat_vec as mat_vec_cuda\n except Exception as e:\n raise RuntimeError(\n \"Theseus C++/Cuda extension cannot be loaded\\n\"\n \"even if Cuda appears to be available. Make sure Theseus\\n\"\n \"is installed with Cuda support (export CUDA_HOME=...)\\n\"\n f\"{type(e).__name__}: {e}\"\n )\n return mat_vec_cuda(batch_size, num_cols, A_row_ptr, A_col_ind, A_val, v)\n else:\n return _mat_vec_cpu(batch_size, num_cols, A_row_ptr, A_col_ind, A_val, v)\n\n\ndef _tmat_vec_cpu(\n batch_size: int,\n num_cols: int,\n A_row_ptr: torch.Tensor,\n A_col_ind: torch.Tensor,\n A_val: torch.Tensor,\n v: torch.Tensor,\n) -> torch.Tensor:\n assert batch_size == A_val.shape[0]\n num_rows = len(A_row_ptr) - 1\n retv_data = np.array(\n [\n csc_matrix((A_val[i].numpy(), A_col_ind, A_row_ptr), (num_cols, num_rows))\n * v[i]\n for i in range(batch_size)\n ],\n dtype=np.float64,\n )\n return torch.tensor(retv_data, dtype=torch.float64)\n\n\ndef tmat_vec(\n batch_size: int,\n num_cols: int,\n A_row_ptr: torch.Tensor,\n A_col_ind: torch.Tensor,\n A_val: torch.Tensor,\n v: torch.Tensor,\n):\n if A_row_ptr.device.type == \"cuda\":\n try:\n from theseus.extlib.mat_mult import tmat_vec as tmat_vec_cuda\n except Exception as e:\n raise RuntimeError(\n \"Theseus C++/Cuda extension cannot be loaded\\n\"\n \"even if Cuda appears to be available. Make sure Theseus\\n\"\n \"is installed with Cuda support (export CUDA_HOME=...)\\n\"\n f\"{type(e).__name__}: {e}\"\n )\n return tmat_vec_cuda(batch_size, num_cols, A_row_ptr, A_col_ind, A_val, v)\n else:\n return _tmat_vec_cpu(batch_size, num_cols, A_row_ptr, A_col_ind, A_val, v)\n\n\ndef _sparse_mat_vec_fwd_backend(\n ctx: Any,\n num_cols: int,\n A_row_ptr: torch.Tensor,\n A_col_ind: torch.Tensor,\n A_val: torch.Tensor,\n v: torch.Tensor,\n op: Callable[\n [int, int, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],\n torch.Tensor,\n ],\n) -> torch.Tensor:\n assert A_row_ptr.ndim == 1\n assert A_col_ind.ndim == 1\n assert A_val.ndim == 2\n assert v.ndim == 2\n ctx.save_for_backward(A_val, A_row_ptr, A_col_ind, v)\n ctx.num_cols = num_cols\n return op(A_val.shape[0], num_cols, A_row_ptr, A_col_ind, A_val, v)\n\n\ndef _sparse_mat_vec_bwd_backend(\n ctx: Any, grad_output: torch.Tensor, is_tmat: bool\n) -> Tuple[torch.Tensor, torch.Tensor]:\n A_val, A_row_ptr, A_col_ind, v = ctx.saved_tensors\n num_rows = len(A_row_ptr) - 1\n A_grad = torch.zeros_like(A_val) # (batch_size, nnz)\n v_grad = torch.zeros_like(v) # (batch_size, num_cols)\n for row in range(num_rows):\n start = A_row_ptr[row]\n end = A_row_ptr[row + 1]\n columns = A_col_ind[start:end].long()\n if is_tmat:\n A_grad[:, start:end] = v[:, row].view(-1, 1) * grad_output[:, columns]\n v_grad[:, row] = (grad_output[:, columns] * A_val[:, start:end]).sum(dim=1)\n else:\n A_grad[:, start:end] = v[:, columns] * grad_output[:, row].view(-1, 1)\n v_grad[:, columns] += grad_output[:, row].view(-1, 1) * A_val[:, start:end]\n return A_grad, v_grad\n\n\nclass _SparseMvPAutograd(torch.autograd.Function):\n @staticmethod\n def forward( # type: ignore\n ctx: Any,\n num_cols: int,\n A_row_ptr: torch.Tensor,\n A_col_ind: torch.Tensor,\n A_val: torch.Tensor,\n v: torch.Tensor,\n ) -> torch.Tensor:\n return _sparse_mat_vec_fwd_backend(\n ctx, num_cols, A_row_ptr, A_col_ind, A_val, v, mat_vec\n )\n\n @staticmethod\n @torch.autograd.function.once_differentiable\n def backward( # type: ignore\n ctx: Any, grad_output: torch.Tensor\n ) -> Tuple[None, None, None, torch.Tensor, torch.Tensor]:\n A_grad, v_grad = _sparse_mat_vec_bwd_backend(ctx, grad_output, False)\n return None, None, None, A_grad, v_grad\n\n\nclass _SparseMtvPAutograd(torch.autograd.Function):\n @staticmethod\n def forward( # type: ignore\n ctx: Any,\n num_cols: int,\n A_row_ptr: torch.Tensor,\n A_col_ind: torch.Tensor,\n A_val: torch.Tensor,\n v: torch.Tensor,\n ) -> torch.Tensor:\n return _sparse_mat_vec_fwd_backend(\n ctx, num_cols, A_row_ptr, A_col_ind, A_val, v, tmat_vec\n )\n\n @staticmethod\n @torch.autograd.function.once_differentiable\n def backward( # type: ignore\n ctx: Any, grad_output: torch.Tensor\n ) -> Tuple[None, None, None, torch.Tensor, torch.Tensor]:\n A_grad, v_grad = _sparse_mat_vec_bwd_backend(ctx, grad_output, True)\n return None, None, None, A_grad, v_grad\n\n\nsparse_mv = _SparseMvPAutograd.apply\nsparse_mtv = _SparseMtvPAutograd.apply\n\n\ndef random_sparse_binary_matrix(\n num_rows: int,\n num_cols: int,\n fill: float,\n min_entries_per_col: int,\n rng: torch.Generator,\n) -> csr_matrix:\n retv = lil_matrix((num_rows, num_cols))\n\n if num_rows > 1 and min_entries_per_col > 0:\n min_entries_per_col = min(num_rows, min_entries_per_col)\n rows_array = torch.arange(num_rows, device=rng.device)\n rows_array_f = rows_array.to(dtype=torch.float)\n for c in range(num_cols):\n row_selection = rows_array[\n rows_array_f.multinomial(min_entries_per_col, generator=rng)\n ].cpu()\n for r in row_selection:\n retv[r, c] = 1.0\n\n # make sure last row is non-empty, so: len(indptr) = rows+1\n retv[\n num_rows - 1, int(torch.randint(num_cols, (), device=rng.device, generator=rng))\n ] = 1.0\n\n num_entries = int(fill * num_rows * num_cols)\n while retv.getnnz() < num_entries:\n col = int(torch.randint(num_cols, (), device=rng.device, generator=rng))\n row = int(torch.randint(num_rows, (), device=rng.device, generator=rng))\n retv[row, col] = 1.0\n\n return retv.tocsr()\n\n\ndef random_sparse_matrix(\n batch_size: int,\n num_rows: int,\n num_cols: int,\n fill: float,\n min_entries_per_col: int,\n rng: torch.Generator,\n device: DeviceType,\n int_dtype: torch.dtype = torch.int64,\n float_dtype: torch.dtype = torch.double,\n) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n A_skel = random_sparse_binary_matrix(\n num_rows, num_cols, fill, min_entries_per_col=min_entries_per_col, rng=rng\n )\n A_row_ptr = torch.tensor(A_skel.indptr, dtype=int_dtype).to(device)\n A_col_ind = torch.tensor(A_skel.indices, dtype=int_dtype).to(device)\n A_val = torch.rand(\n batch_size,\n A_col_ind.size(0),\n device=rng.device,\n dtype=float_dtype,\n generator=rng,\n ).to(device)\n return A_col_ind, A_row_ptr, A_val, A_skel\n\n\ndef split_into_param_sizes(\n n: int, param_size_range_min: int, param_size_range_max: int, rng: torch.Generator\n) -> List[int]:\n paramSizes = []\n tot = 0\n while tot < n:\n newParam = min(\n int(\n torch.randint(\n param_size_range_min,\n param_size_range_max,\n (),\n device=rng.device,\n generator=rng,\n )\n ),\n n - tot,\n )\n tot += newParam\n paramSizes.append(newParam)\n return paramSizes\n","repo_name":"facebookresearch/theseus","sub_path":"theseus/utils/sparse_matrix_utils.py","file_name":"sparse_matrix_utils.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","stars":1481,"dataset":"github-code","pt":"81"} +{"seq_id":"25866218951","text":"import os\n\nfrom pyserini.dsearch import SimpleDenseSearcher, TctColBertQueryEncoder\nfrom utils import load_tsv_file, save_file\n\nTEXT_DATA_FILE = \"./msmarco-test2019-queries.tsv\"\nRESULT_FILE = \"./msmarco-test2019-results.tsv\"\n\ndef main():\n test_data = load_tsv_file(TEXT_DATA_FILE)\n \n\n encoder = TctColBertQueryEncoder('castorini/tct_colbert-msmarco')\n searcher = SimpleDenseSearcher.from_prebuilt_index(\n 'msmarco-passage-tct_colbert-hnsw',\n encoder\n )\n result_data = []\n for data in test_data:\n hits = searcher.search(data[1])\n\n for i in range(0, len(hits)):\n result_string = f\"{data[0]} Q0 D{hits[i].docid:7} {i+1} {hits[i].score:.5f} IndriQueryLikelihood\"\n result_data.append(result_string)\n print(f'{i+1:2} {hits[i].docid:7} {hits[i].score:.5f}')\n\n save_file(result_data, RESULT_FILE)\n\n print(\"______End________\")\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"Zaker237/doc-retrieval-pyserini","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12624699510","text":"from flask import Flask, render_template, request\nimport requests\ntry:\n from config import API_KEY\nexcept ImportError:\n print(\"You need to execute setup.py first. Press enter to exit...\")\n input()\n quit()\n \n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/weather', methods=['POST'])\ndef get_weather():\n city = request.form['city']\n url = f'http://api.weatherapi.com/v1/current.json?key={API_KEY}&q={city}'\n\n response = requests.get(url)\n weather_data = response.json()\n\n try:\n current_data = weather_data['current']\n temperature_c = round(current_data['temp_c'])\n feelslike_c = round(current_data['feelslike_c'])\n temperature_f = round(current_data['temp_f'])\n feelslike_f = round(current_data['feelslike_f'])\n description = current_data['condition']['text']\n descriptionicon = current_data['condition']['icon']\n wind_dir = current_data['wind_dir']\n wind_speed = current_data['wind_kph']\n humidity = current_data['humidity']\n precip = current_data.get('precip_mm', 'N/A')\n isday = current_data['is_day']\n\n return render_template('weather.html', city=city, temperature_c=temperature_c, feelslike_c=feelslike_c,\n temperature_f=temperature_f, feelslike_f=feelslike_f, description=description, wind_dir=wind_dir, \n wind_speed=wind_speed, humidity=humidity,\n precip=precip, isday=isday, descriptionicon=descriptionicon)\n\n except KeyError as e:\n print(f\"Error: {e}\")\n return render_template('weather.html', error=\"Error retrieving weather data. Please try again.\")\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"Alecaialex/weather-web","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8308562057","text":"import logging\nimport os\nimport sys\n\nif __name__ == '__main__':\n sys.path.append(os.environ.get('XLRINFRADIR', ''))\n\nfrom py_common.jenkins_aggregators import JenkinsAggregatorBase\nfrom py_common.mongo import MongoDB\n\nAGGREGATOR_PLUGINS = [{'class_name': 'FuncTestLogParser',\n 'job_names': ['__ALL__']}]\n\n\nclass FuncTestLogParserException(Exception):\n pass\n\n\n# N.B. This parser relies on logging modifications that were added Sep. 2020\nclass FuncTestLogParser(JenkinsAggregatorBase):\n def __init__(self, *, job_name):\n \"\"\"\n Class-specific initialization.\n \"\"\"\n super().__init__(job_name=job_name,\n agg_name=self.__class__.__name__,\n send_log_to_update=True)\n self.logger = logging.getLogger(__name__)\n\n\n def _get_timestamp_ms(self, *, fields):\n try:\n return int(self.start_time_ms+(float(fields[0])*1000))\n except ValueError:\n self.logger.exception(\"timestamp parse error: {}\".format(line))\n return None\n\n def _do_update_build(self, *, jbi, log, is_reparse=False, test_mode=False):\n \"\"\"\n Parse the log for sub-test info.\n \"\"\"\n self.start_time_ms = jbi.start_time_ms()\n self.duration_ms = jbi.duration_ms()\n\n subtest_data = {}\n cur_subtest = None\n\n for lnum, line in enumerate(log.splitlines()):\n\n fields = line.split()\n if len(fields) < 3:\n continue\n\n if cur_subtest is not None:\n if fields[1] == 'Error:':\n cur_subtest['result'] = \"Error\"\n cur_subtest['reason'] = \" \".join(fields[3:])\n continue\n elif fields[1] == \"SUBTEST_RESULT:\" or fields[1] == \"TESTCASE_RESULT:\":\n cur_subtest['result'] = \" \".join(fields[3:])\n continue\n else:\n # If field[1] is our subtest name assume fields[3:] is result\n name = fields[1][1:-1]\n if name == cur_subtest['name']:\n cur_subtest['result'] = \" \".join(fields[3:]) # XXXrs\n continue\n\n if fields[1] == \"SUBTEST_START:\" or fields[1] == \"TESTCASE_START:\":\n if cur_subtest is not None:\n raise FuncTestLogParserException(\n \"nested TEST_START\\n{}: {}\".format(lnum, line))\n\n test_name = fields[2]\n test_id = MongoDB.encode_key(test_name)\n cur_subtest = {'name': test_name,\n 'id': test_id,\n 'start_time_ms': self._get_timestamp_ms(fields=fields)}\n continue\n\n if fields[1] == \"SUBTEST_END:\" or fields[1] == \"TESTCASE_END:\":\n if cur_subtest is None:\n raise FuncTestLogParserException(\n \"TEST_END before TEST_START\\n{}: {}\"\n .format(lnum, line))\n\n if fields[2] != cur_subtest['name']:\n raise FuncTestLogParserException(\n \"unmatched TEST_END for {} while cur_subtest {}\\n{}: {}\"\n .format(fields[2], cur_subtest, lnum, line))\n\n ts_ms = self._get_timestamp_ms(fields=fields)\n duration_ms = ts_ms - cur_subtest['start_time_ms']\n cur_subtest['duration_ms'] = duration_ms\n test_id = cur_subtest.pop('id')\n if test_id not in subtest_data:\n subtest_data[test_id] = {}\n iteration = len(subtest_data[test_id].keys())+1\n subtest_data[test_id][str(iteration)] = cur_subtest\n cur_subtest = None\n continue\n\n if cur_subtest is None:\n continue\n\n if fields[1] == \"NumTests:\":\n try:\n cnt = int(fields[2])\n except ValueError:\n raise FuncTestLogParserException(\n \"non-integer NumTests value\\n{}: {}\".format(lnum, line))\n if cnt > 1:\n raise FuncTestLogParserException(\n \"unexpected NumTests value\\n{}: {}\".format(lnum, line))\n if cnt == 0:\n cur_subtest['result'] = \"Skip\" # XXXrs ?!?\n\n return {'functest_subtests': subtest_data}\n\n\n def update_build(self, *, jbi, log, is_reparse=False, test_mode=False):\n try:\n return self._do_update_build(jbi=jbi, log=log,\n is_reparse=is_reparse,\n test_mode=test_mode)\n except:\n self.logger.error(\"LOG PARSE ERROR\", exc_info=True)\n\n\n# In-line \"unit test\"\nif __name__ == '__main__':\n import argparse\n from pprint import pprint, pformat\n from py_common.jenkins_api import JenkinsApi, JenkinsBuildInfo\n\n # It's log, it's log... :)\n logging.basicConfig(level=logging.INFO,\n format=\"'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s\",\n handlers=[logging.StreamHandler(sys.stdout)])\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--job\", help=\"jenkins job name\", default=\"FuncTestTrigger\")\n parser.add_argument(\"--bnum\", help=\"jenkins build number\", default=\"15352\")\n parser.add_argument(\"--log\", help=\"just print out the log\", action=\"store_true\")\n args = parser.parse_args()\n\n test_builds = []\n builds = args.bnum.split(':')\n if len(builds) == 1:\n test_builds.append((args.job, args.bnum))\n else:\n for bnum in range(int(builds[0]), int(builds[1])+1):\n test_builds.append((args.job, bnum))\n\n japi = JenkinsApi(host='jenkins.int.xcalar.com')\n\n for job_name,build_number in test_builds:\n parser = FuncTestLogParser(job_name=job_name)\n jbi = JenkinsBuildInfo(job_name=job_name, build_number=build_number, japi=japi)\n log = jbi.console()\n result = jbi.result()\n if args.log:\n print(log)\n else:\n print(\"checking job: {} build: {} result: {}\".format(job_name, build_number, result))\n data = parser.update_build(jbi=jbi, log=jbi.console())\n pprint(data)\n","repo_name":"varlogtim/xcalar-infra","sub_path":"py_common/jenkins_aggregators/update/plugins/functest_log_parser.py","file_name":"functest_log_parser.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32784799599","text":"'''\r\nCreated on Mar 30, 2016\r\n\r\n@author: Akash.Gupta17\r\n'''\r\nclass Card:\r\n def arrange(self,s,one,two,three):\r\n for k in range(0,15,3):\r\n one.append(s[k])\r\n for l in range(1,15,3):\r\n two.append(s[l])\r\n for m in range(2,15,3):\r\n three.append(s[m])\r\n return one,two,three\r\n \r\n def insert(self,one,new_one):\r\n for temp in range(5):\r\n new_one.append(one[temp])\r\n return new_one","repo_name":"akashg105/NumberGame","sub_path":"arrangeCherry.py","file_name":"arrangeCherry.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27542444034","text":"def ExposePattern(intList):\n stringList = []\n for i in range(1, len(intList)):\n stringList.append(str(intList[i] - intList[i-1]))\n return stringList\n\n# print(ExposePattern([1, 3, 2, 2, 4, 3, 3, 5, 4]))\n\ndef ContainsPattern(list1, list2):\n match = True\n for i in range(len(list2)):\n if list1[i] != list2[i]:\n match = False\n break\n return match\n\n# -1 -1 +2 -1 -1 +2 -1 -1\n# Pattern : -1 -1 +2\n\n\ndef CheckPattern(length, list):\n pattern = list[0:length]\n for i in range(0, len(list), length):\n if len(list) - i < length:\n split = list[i:len(list)]\n if not ContainsPattern(pattern, split):\n return False\n else:\n split = list[i:i+length]\n if not pattern == split:\n return False\n return True\n\n\nwhile True:\n num = []\n for i in input().strip().split(\" \"):\n num.append(int(i))\n if num[0] == 0:\n break\n num.pop(0)\n\n # [2, 3, 2, 3, 2, 3,2, 3,3,5,4]\n # +1 -1 +1 -1 ....\n pattern = ExposePattern(num)\n shortestPattern = len(pattern)\n for i in range(len(pattern), 0, -1):\n if CheckPattern(i, pattern):\n if i < shortestPattern:\n shortestPattern = i\n print(shortestPattern)\n\n\n\n","repo_name":"KennyCheung-Dev/PythonClassContent","sub_path":"PythonAlgo/CCC21/CCC2010J4_GlobalWarming.py","file_name":"CCC2010J4_GlobalWarming.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30409672082","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/9/20\n\n@author: gaoan\n\"\"\"\nimport pytz\n\neastern = pytz.timezone('US/Eastern')\nchina = pytz.timezone('Asia/Shanghai')\nhongkong = pytz.timezone('Asia/Hong_Kong')\n\n\ndef has_value(m, key):\n if not m:\n return False\n if not (key in m):\n return False\n if not m[key]:\n return False\n return True\n","repo_name":"algo21-115010302/TouchFishCapital","sub_path":"TouchFishCapital/venv/Lib/site-packages/tigeropen/common/util/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"13249718143","text":"from tkinter import * \nfrom tkinter import ttk\nfrom tkcalendar import Calendar, DateEntry\nfrom tkinter import messagebox\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageTk\nimport PIL.Image\nimport sqlite3\nfrom Main_Window.Currency import *\nfrom Main_Window.theme import ttk_theme\n\n\ndef callbalance(root):\n labelframe1 = ttk.LabelFrame(root, text=\"Balance Section\") \n labelframe1.grid(row=1,column = 0,columnspan=2, sticky='WE', \\\n padx=20, pady=20, ipadx=30, ipady=30) \n \n db = sqlite3.connect('myspendmate.db')\n cursor = db.cursor()\n total_income = 0\n total_expense = 0\n\n cursor.execute(\"select sum(amount) from income\")\n total_income = cursor.fetchone()[0]\n if(str(total_income) == 'None'):\n total_income = 0\n print('total_income :' + str(total_income))\n\n cursor.execute(\"select sum(amount) from expense\")\n total_expense = cursor.fetchone()[0]\n if(str(total_expense)=='None'):\n total_expense=0\n\n print('total_expense :' + str(total_expense))\n\n\n balance = total_income - total_expense\n CurrencyCurrent = CurrentCurrr()\n print('balance :' + str(balance) + str(CurrencyCurrent))\n cursor.close()\n db.commit()\n db.close()\n\n rootlabel = ttk.Label(labelframe1, text=\"Your Current Balance is : \")\n # rootlabel.config(font=(\"Courier\", 16)) \n rootlabel.grid(row = 1, column = 0, pady = 10) \n print(\"current\" + CurrencyCurrent)\n rootlabel1 = ttk.Label(labelframe1, text=str(balance) + str(CurrencyCurrent))\n rootlabel1.config(font=(\"Courier\", 13)) \n rootlabel1.grid(row = 1, column = 1, pady = 10) ","repo_name":"tushargithub44/SpendMate","sub_path":"Main_Window/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"6433314808","text":"import logging\nimport random\nimport time\nfrom pathlib import Path\nfrom typing import List\n\nfrom PIL import Image\nfrom RPi import GPIO\n\nfrom pi_ink.apps.iapp import IApp\nfrom pi_ink.displays import EDisplayResponse, InkyImpressionDisplay\nfrom pi_ink.renderers import ImageRenderer\n\nlogger = logging.getLogger(__name__)\n\n\nclass PictureFrame(IApp):\n _btns: List[int] = [\n 5,\n 6,\n 16,\n 24,\n ] # gpio pins for each button (from top btn to bottom btn)\n _btn_names: List[str] = [\"A\", \"B\", \"C\", \"D\"] # names for each button\n _pic_dir: Path\n _all_pic_fps: List[Path]\n _cur_pic_fp: Path = None\n _cur_pic_img: Path = None\n _history: List[Path] = []\n _history_cursor: int = 0\n _history_limit: int = 1000\n _t0: float = 0.0\n _t1: float = 0.0\n _do_update: bool = True\n _timer_paused: bool = False\n _display_busy: bool = False\n\n def btn_busy_ignore(fn):\n \"\"\"\n Decorator that ignores button presses if the display is busy.\n \"\"\"\n\n def wrapper(self, *args, **kwargs):\n if self._display_busy:\n logger.info(\"display busy, ignoring button press\")\n return\n fn(self, *args, **kwargs)\n\n return wrapper\n\n @btn_busy_ignore\n def __handle_btn_a(self):\n logger.info(\"btn a -> requesting next picture\")\n self._cur_pic_fp, self._cur_pic_img = self.__next_picture()\n self._do_update = True\n self.__reset_timer()\n\n @btn_busy_ignore\n def __handle_btn_b(self):\n logger.info(\"btn b -> requesting prev picture\")\n self._cur_pic_fp, self._cur_pic_img = self.__prev_picture()\n self._do_update = True\n self.__reset_timer()\n\n @btn_busy_ignore\n def __handle_btn_c(self):\n logger.info(\"btn c -> clearing history\")\n self._history = [self._cur_pic_fp]\n self._history_cursor = 0\n\n @btn_busy_ignore\n def __handle_btn_d(self):\n logger.info(\"btn d -> toggle timer\")\n self._timer_paused = not self._timer_paused\n if self._timer_paused:\n logger.info(\"timer paused\")\n else:\n logger.info(\"timer unpaused\")\n self.__reset_timer()\n\n _btn_fns = {\n \"A\": __handle_btn_a,\n \"B\": __handle_btn_b,\n \"C\": __handle_btn_c,\n \"D\": __handle_btn_d,\n }\n\n def __btn_callback(self, pin):\n btn_name = self._btn_names[self._btns.index(pin)]\n self._btn_fns[btn_name](self)\n\n def __init__(self, **kwargs):\n GPIO.setmode(GPIO.BCM) # setups RPI.GPIO to use the BCM pin numbering scheme\n\n # btns connect to ground, set them as inputs with pull-up resistors\n GPIO.setup(self._btns, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n # setup callbacks for each button\n for btn, btn_name in zip(self._btns, self._btn_names):\n GPIO.add_event_detect(\n btn, GPIO.FALLING, callback=self.__btn_callback, bouncetime=250\n )\n logger.info(f\"added callback for button {btn_name} on pin {btn}\")\n\n # create path relative to this file and one level up\n self._pic_dir = Path(__file__).parent.parent / \"photos\"\n self._all_pic_fps = [\n fp\n for fp in self._pic_dir.iterdir()\n if fp.is_file() and fp.suffix in [\".JPG\", \".jpg\", \".png\"]\n ]\n\n def __next_picture(self) -> (Path, Image):\n # check if cursor is at the end of the history\n if len(self._history) == 0 or self._history_cursor == len(self._history) - 1:\n logger.info(\"getting random picture\")\n # get a random picture\n fp, img = self.__get_random_picture()\n self._history.append(fp)\n\n # if this is the first picture in the history, set the cursor to 0\n # otherwise, increment the cursor\n #\n # only need to do this check for the next picture, and not the previous picture, method\n if len(self._history) == 1:\n self._history_cursor = 0\n else:\n self._history_cursor += 1\n\n # if the history is longer than history limit, remove the oldest picture\n if len(self._history) > self._history_limit:\n self._history.pop(0)\n\n return fp, img\n\n # otherwise, get the next picture in the history\n logger.info(\n \"getting next picture from history\",\n extra={\n \"history_cursor\": self._history_cursor,\n \"history_cursor_next\": self._history_cursor + 1,\n \"current_cursor_fname\": self._history[self._history_cursor].name,\n \"next_cursor_fname\": self._history[self._history_cursor + 1].name,\n },\n )\n self._history_cursor += 1\n fp = self._history[self._history_cursor]\n img = Image.open(fp)\n return fp, img\n\n def __prev_picture(self) -> (Path, Image):\n # check if cursor is at the beginning of the history\n if len(self._history) == 0 or self._history_cursor == 0:\n logger.info(\"getting random picture\")\n\n # if the history is longer than history limit, remove the oldest picture\n #\n # important [for prev only] that this is done before to ensure that we don't remove the previous picture we\n # are adding at the begging of the history\n if (len(self._history) + 1) > self._history_limit:\n self._history.pop(0)\n\n # get a random picture\n fp, img = self.__get_random_picture()\n self._history.insert(0, fp)\n return fp, img\n\n # otherwise, get the previous picture in the history\n logger.info(\n \"getting next picture from history\",\n extra={\n \"history_cursor\": self._history_cursor,\n \"history_cursor_prev\": self._history_cursor - 1,\n \"current_cursor_fname\": self._history[self._history_cursor].name,\n \"prev_cursor_fname\": self._history[self._history_cursor - 1].name,\n },\n )\n self._history_cursor -= 1\n fp = self._history[self._history_cursor]\n img = Image.open(fp)\n return fp, img\n\n def __get_random_picture(self) -> (Path, Image):\n fp = random.choice(self._all_pic_fps)\n\n if self._cur_pic_fp is not None:\n while (\n fp == self._cur_pic_fp\n ): # make sure we don't get the same picture twice in a row\n fp = random.choice(self._all_pic_fps)\n\n # attempt to get a picture that is not in the history only if the history is smaller than\n # the total number of pictures\n if 0 < len(self._history) < len(self._all_pic_fps):\n logger.info(\n \"attempting to get a picture that is not in the history\"\n )\n new_attempt = 0\n shrinking_all_pic_fps = self._all_pic_fps\n while (\n fp in self._history\n and new_attempt < 10\n and len(shrinking_all_pic_fps) > 0\n ):\n fp = random.choice(self._all_pic_fps)\n # not the most efficient, but works\n shrinking_all_pic_fps = [\n fp for fp in shrinking_all_pic_fps if fp != fp\n ]\n new_attempt += 1\n logger.info(\n f\"attempted {new_attempt} times to get a picture that is not in the history\"\n )\n\n img = Image.open(fp)\n return fp, img\n\n def __reset_timer(self):\n self._t0 = time.time()\n self._t1 = self._t0\n\n def run(self, **kwargs):\n img_renderer = ImageRenderer()\n display = InkyImpressionDisplay()\n change_picture_interval = 60 * 3 # in seconds\n\n while True:\n if not self._timer_paused:\n self._t1 = time.time()\n\n if self._t1 - self._t0 >= change_picture_interval:\n self._do_update = True\n\n # not necessary but will ensure we don't get stuck in a loop continually trying to update the display\n\n logger.info(\"changing picture\")\n self._cur_pic_fp, self._cur_pic_img = self.__next_picture()\n self.__reset_timer()\n\n if not self._do_update:\n continue\n\n logger.info(\n f\"displaying picture {self._cur_pic_fp.name}\",\n extra={\n \"history_cursor\": self._history_cursor,\n \"history_size\": len(self._history),\n \"timer_paused?\": self._timer_paused,\n },\n )\n\n self._display_busy = True\n frame = img_renderer.render_picture_frame(self._cur_pic_img)\n sat = kwargs.get(\"saturation\", 0.5)\n dynamic_saturation = kwargs.get(\"dynamic_saturation\", False)\n logger.info(\n f\"displaying frame [saturation={sat}, dynamic_saturation={dynamic_saturation}]\"\n )\n display.set_frame(\n frame, saturation=sat, dynamic_saturation=dynamic_saturation\n )\n res = display.display_frame()\n\n if res.response == EDisplayResponse.ERROR:\n logger.error(f\"error displaying frame: {res.value}\")\n break\n\n if res.response == EDisplayResponse.NOT_READY:\n logger.info(f\"display not ready, waiting {res.value}s\")\n time.sleep(res.value)\n continue\n\n self._do_update = False\n self._display_busy = False\n self.__reset_timer() # reset timer\n","repo_name":"HOWZ1T/pi-ink","sub_path":"pi_ink/apps/pictureframe.py","file_name":"pictureframe.py","file_ext":"py","file_size_in_byte":9863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11056733302","text":"from ..imports import *\n\n\ndef lnprob(p,y,gp):\n # Trivial uniform prior.\n if np.any((-10 > p[1:]) + (p[1:] > 10)):\n return -np.inf\n\n # Update the kernel and compute the lnlikelihood.\n gp.set_parameter_vector(p)\n return gp.lnlikelihood(y, quiet=True)\n\ndef nllGP(p,y,t,e,gp):\n # if p[-1] < -1:\n # return 1e25\n # print(p)\n gp.set_parameter_vector(p)\n # try:\n # gp.compute(t, yerr=e)\n # except:\n # return 1e25\n # c = -gp.log_likelihood(y)\n # return -gp.log_likelihood(y)\n ll = gp.log_likelihood(y, quiet=True)\n return -ll if np.isfinite(ll) else 1e25\n\ndef grad_nllGP(p,y,t,e,gp):\n gp.set_parameter_vector(p)\n # try:\n # gp.compute(t, yerr=e)\n # except:\n # return np.array([0.0]*len(p))\n # c= -gp.grad_log_likelihood(y)\n return -gp.grad_log_likelihood(y,quiet=True)\n\ndef gp(x,\n y,\n yerr,\n x_original,\n rotation_period=None,\n rotation_amp=None,\n sqexp_metric=0.5,\n amp_metric=None,\n plot=False,\n figsize=(12,4),\n verbose=False\n ):\n\n x_pred = np.linspace(np.min(x), np.max(x), 1000)\n\n # try:\n # jitter = george.kernels.ConstantKernel(log_constant=np.log(np.nanmedian(yerr)))\n if amp_metric is None:\n amp = george.kernels.ConstantKernel(log_constant=np.log(np.std(y)))\n else:\n amp = george.kernels.ConstantKernel(log_constant=np.log(amp_metric))\n sqexp = george.kernels.ExpSquaredKernel(sqexp_metric, metric_bounds={'log_M_0_0': (np.log(0.01), np.log(1000))})\n\n # If the user has passed a rotation period to the GP then use quasi-periodic kernel, otherwise use a squared\n # exponential kernel.\n if rotation_period is not None:\n if rotation_amp is None:\n message = f\"\"\" A rotation period ({rotation_period}d), but no amplitude, has been passed to the GP, \n therefore we will use 0.1 as the starting value\"\"\"\n cheerfully_suggest(message)\n rotation_amp = 0.1\n sinexp = george.kernels.ExpSine2Kernel(gamma=rotation_amp, log_period=np.log(rotation_period))\n gpkernel = amp * (sqexp*sinexp)\n kernel = \"quasi-periodic\"\n if verbose:\n print(f\"Fitting rotation with quasi-periodic GP, period = {rotation_period}d...\")\n else:\n gpkernel = amp * sqexp\n kernel = \"square_exponential\"\n if verbose:\n print(f\"Fitting rotation with square-exponential GP...\")\n\n gaussproc = george.GP(gpkernel, white_noise=np.log(np.nanmedian(yerr)), fit_white_noise=True)\n gaussproc.compute(x, yerr)\n\n if verbose:\n print(\"Initial Parameter Vector: \", gaussproc.get_parameter_vector())\n if kernel == \"quasi-periodic\":\n p = gaussproc.get_parameter_vector()\n if verbose:\n print('Initial Params: AMP = ', str(math.exp(p[1])), ', SQEXP= ', str(math.exp(p[2])),\n ', GAMMA= ', str(p[3]), \", Period = \", str(math.exp(p[4])), \" days\",', JITTER = ',\n str(math.exp(p[0])))\n else:\n p = gaussproc.get_parameter_vector()\n if verbose:\n print('Initial Params: AMP = ', str(math.exp(p[1])), ', SQEXP= ', str(math.exp(p[2])),\n ', JITTER = ', str(math.exp(p[0])))\n\n if verbose:\n print(\"Initial ln-likelihood: {0:.2f}\".format(gaussproc.log_likelihood(y)))\n\n soln = minimize(nllGP, gaussproc.get_parameter_vector(), bounds=gaussproc.get_parameter_bounds(),\n jac=grad_nllGP, method=\"L-BFGS-B\", args=(y, x, yerr, gaussproc))\n p1 = soln.x\n\n if verbose:\n print('Fitted GP HPs:', p1)\n if kernel == \"quasi-periodic\":\n if verbose:\n print('Fitted Params: AMP = ', str(math.exp(p1[1])), ', SQEXP= ', str(math.exp(p1[2])), ', GAMMA= ',\n str(p1[3]), \", Period = \",\n str(math.exp(p1[4])), \" days\", ', JITTER = ', str(math.exp(p1[0])))\n hp = {'amp': math.exp(p1[1]), 'sqexp': math.exp(p1[2]), 'gamma': p1[3], 'period': math.exp(p1[4]),\n 'jitter': math.exp(p1[0])}\n else:\n p = gaussproc.get_parameter_vector()\n if verbose:\n print('Fitted Params: AMP = ', str(math.exp(p1[1])), ', SQEXP= ', str(math.exp(p1[2])), ', JITTER = ',\n str(math.exp(p1[0])))\n hp = {'amp': math.exp(p1[1]), 'sqexp': math.exp(p1[2]), 'jitter': math.exp(p1[0])}\n\n gaussproc.set_parameter_vector(p1)\n gaussproc.compute(x, yerr)\n jitter = math.exp(p[0])\n\n if verbose:\n print(\"Final ln-likelihood: {0:.2f}\".format(gaussproc.log_likelihood(y)))\n mu_zero, var = gaussproc.predict(y, x, return_var=True)\n mu = mu_zero + 1\n\n kernel_dict = {'name':kernel, 'hyperparameters':hp}\n\n if plot:\n plt.figure(figsize=figsize)\n plt.errorbar(x, y+1, yerr=yerr, fmt=\".k\", capsize=0,zorder=0, alpha=0.3)\n\n if x_pred is not None:\n pred_mu, pred_var = gaussproc.predict(y, x_pred, return_var=True)\n y_pred = pred_mu + 1\n yerr_pred = np.sqrt(pred_var)\n else:\n x_pred = x\n y_pred = mu\n yerr_pred = np.sqrt(var)\n\n plt.fill_between(x_pred, y_pred - yerr_pred, y_pred + yerr_pred,\n color=\"orange\", alpha=0.5,zorder=2)\n plt.plot(x_pred, y_pred, \"orange\", lw=1.5, alpha=0.8,zorder=2)\n plt.xlabel(\"Time\")\n plt.ylabel(\"Relative Flux\")\n plt.show()\n plt.close()\n\n # except Exception as e:\n # print(e)\n # return [],0, 0, {}\n og_mu, og_var = gaussproc.predict(y, x_original, return_var=True)\n\n return mu, var, jitter, og_mu, og_var, kernel_dict, gaussproc","repo_name":"catrionamurray/occultence","sub_path":"occultence/lightcurve_detrending/gp.py","file_name":"gp.py","file_ext":"py","file_size_in_byte":5656,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"13255902542","text":"# pylint: disable=line-too-long\n\nimport streamlit as st\nfrom dotenv import load_dotenv\nfrom PIL import Image\nfrom streamlit_chat import message\n\nfrom agents.agent import create_agent\n\nload_dotenv('.env')\n\nagent = create_agent()\n\n\ndef display_header_and_image():\n \"\"\"\n Displays the header information for the chatbot and an image.\n \"\"\"\n st.subheader('Chatbot powered by Langchain, ChatGPT, Chroma DB, and Streamlit')\n image = Image.open('images/chatbot_architecture.png')\n st.image(\n image,\n caption='Chatbot Architecture',\n use_column_width=True,\n )\n\n\ndef initialize_session():\n \"\"\"\n Initializes or resets session variables.\n \"\"\"\n if 'responses' not in st.session_state:\n st.session_state['responses'] = ['How can I assist you?']\n if 'requests' not in st.session_state:\n st.session_state['requests'] = []\n\n\ndef display_chat_history():\n \"\"\"\n Displays the chat history.\n \"\"\"\n for i, response in enumerate(st.session_state['responses']):\n message(response, key=str(i))\n if i < len(st.session_state['requests']):\n message(st.session_state['requests'][i], is_user=True, key=str(i) + '_user')\n\n\ndef main():\n display_header_and_image()\n initialize_session()\n # container for chat history\n response_container = st.container()\n # container for text box\n textcontainer = st.container()\n\n with textcontainer:\n query = st.text_input('Prompt: ', placeholder='Enter your prompt here..')\n if query:\n with st.spinner('Generating Response...'):\n result = agent(query)\n st.session_state.requests.append(query)\n\n st.session_state.responses.append(result['output'])\n\n with response_container:\n display_chat_history()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nirbar1985/compibot","sub_path":"chatbot_ui.py","file_name":"chatbot_ui.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"11151252004","text":"import os\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport cv2 as cv\nimport random\nimport pickle\nimport sys\nimport datetime\n# sys.path.insert(0, 'Z:\\Im2Flow\\two-stream')\n\n\ndef dump(a, path):\n f = open(path, 'wb')\n pickle.dump(a, f)\n f.close()\ndef load(path):\n f = open(path, 'rb')\n a = pickle.load(f)\n f.close()\n return a\n\n# print(torch.cuda.is_available())\n# for i in range(torch.cuda.device_count()):\n# print(torch.cuda.get_device_name(i))\n\n\nclass make_dataset():\n def __init__(self, data_root='Z:\\\\UCF101\\\\UCF-101'):\n self.data_root = data_root\n # self.len = sample_num\n self.all_data = {} # key is index of a frame 0-2484199, value is a tuple (pathid, i_th frame, label)\n # path = self.flist[pathid]\n self.labelDict = {} # key is name of directory, such as 'RopeClimbing', value is label\n self.flist = []\n for root, dirs, files in os.walk(self.data_root):\n if len(dirs) == 0:\n self.flist += [os.path.join(root, file) for file in files]\n else:\n for label, name in enumerate(dirs):\n self.labelDict[name] = label\n self.labelDict['HandStandPushups'] = self.labelDict['HandstandPushups'] # name inconsistent in UCF101\n total_frame_num = 0\n for idx, f in enumerate(self.flist):\n # print(idx, len(self.all_data))\n video = cv.VideoCapture(f)\n frame_num = int(video.get(cv.CAP_PROP_FRAME_COUNT))\n for frame_idx, i in enumerate(range(total_frame_num, total_frame_num+frame_num-10)):\n self.all_data[i] = (idx, frame_idx, self.labelDict[f.split('v_')[1].split('_g')[0]])\n total_frame_num += frame_num-10\n \n dump(self.flist, 'flist.list')\n dump(self.all_data, 'all_data.dict')\n dump(self.labelDict, 'labelDict.dict')\n \n def train_test_split(self, train_num=100000, test_num=50000):\n self.sampled_data_list = random.sample(range(len(self.all_data)), train_num+test_num)\n self.train_data = [self.all_data[i] for i in self.sampled_data_list[:train_num]]\n self.test_data = [self.all_data[i] for i in self.sampled_data_list[train_num:]]\n timestamp = datetime.datetime.now().strftime('%y.%m.%d.%H.%M.%S')\n dump(self.train_data, 'train_data_'+str(train_num)+'_'+timestamp+'.dict')\n dump(self.test_data, 'test_data_'+str(test_num)+'_'+timestamp+'.dict')\n \n\nclass UCF101(Dataset):\n def __init__(self, mode='train', sample_num=100000, loadName=''):\n self.flist = load('flist.list')\n self.labelDict = load('labelDict.dict')\n all_data = load('all_data.dict')\n if loadName != '':\n self.data = load(loadName)\n print('Loading train test split from %s' % loadName)\n else:\n sampled_data_list = random.sample(range(len(all_data)), sample_num)\n self.data = [all_data[i] for i in sampled_data_list]\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, index):\n tup = self.data[index]\n img = cv.VideoCapture(self.flist[tup[0]])\n # print(self.flist[tup[0]], tup[1])\n img.set(cv.CAP_PROP_POS_FRAMES, tup[1])\n _, img = img.read()\n img = transforms.ToTensor()(img)\n img = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(img)\n # label = torch.zeros(101)\n # label[tup[2]] = 1\n label = tup[2]\n return img, label","repo_name":"Olafyii/two-stream","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12940333345","text":"\"\"\"\nseek.py 文件偏移量\n\"\"\"\n#文件偏移量在开头\nf=open(\"test\",\"wb+\")\nf.write(\"hello world\\n\".encode())\nf.close()\nf=open(\"test\",\"rb+\")\nprint(f.tell())\nf.seek(-5,2)#以开头为基准,向后移动0字节\nprint(f.read())","repo_name":"zlz2013/zlz","sub_path":"network_base/week01/day04/seek.py","file_name":"seek.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3722916447","text":"from responses import *\n\n# Variables\nuser_answer = \"\"\nquestion_mapping = {'1': [q1, q1_answer], '2': [q2, q2_answer], '3': [q3, q3_answer]}\n\n# Introduction\nprint(opener, end = \"\\n\\n\")\n\n# Main Menu with Questions\ndef main_menu():\n print(menu, end = \"\\n\\n\")\n user_answer = input(menu_options)\n # Invalid input error handling\n while user_answer not in ['1', '2', '3', 'exit']:\n print(\"Invalid input:\")\n user_answer = input(menu_options)\n\n if user_answer == \"exit\":\n quit()\n\n handle_question(user_answer)\n\n\n# Helper function to answer questions\ndef handle_question(q):\n question, answer = question_mapping[q]\n print()\n print(question + '\\n' + sep + '\\n' + answer + '\\n' + sep + '\\n')\n user_answer = input(\"Return to main menu? [y/n]: \")\n if user_answer.lower() == 'y':\n print()\n main_menu()\n else:\n quit()\n\nmain_menu()\n\n","repo_name":"Saltedzz/ptc_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7088749448","text":"import sqlite3\r\n\r\ncon = sqlite3.connect('info.db')\r\ncur = con.cursor()\r\n\r\ncur.execute(\"CREATE TABLE IF NOT EXISTS students (name, age, job);\")\r\n\r\nstudents = [\r\n {\"name\": \"sajad\", \"age\": \"22\", \"job\": \"backend dev\"},\r\n {\"name\": \"amir\", \"age\": \"15\", \"job\": \"db admin\"},\r\n {\"name\": \"sarina\", \"age\": \"16\", \"job\": \"frontend dev\"},\r\n {\"name\": \"elisa\", \"age\": \"16\", \"job\": \"project manager\"},\r\n {\"name\": \"zahra\", \"age\": \"23\", \"job\": \"frontend dev\"},\r\n {\"name\": \"ali\", \"age\": \"17\", \"job\": \"backend dev\"},\r\n]\r\n\r\n\r\ndef save(student):\r\n query = f\"INSERT INTO students VALUES {(student['name'], student['age'], student['job'])};\"\r\n cur.execute(query)\r\n con.commit()\r\n # print(f\"Saved student {student['name']}\")\r\n\r\n\r\ndef show():\r\n cur.execute(\"SELECT * FROM students;\")\r\n records = cur.fetchall()\r\n for i in records:\r\n print(i)\r\n\r\n\r\ndef clear():\r\n cur.execute(\"DELETE FROM students;\")\r\n\r\n\r\n# for i in students:\r\n# save(i)\r\n\r\nshow()\r\n\r\ncon.commit()\r\ncon.close()\r\nprint('Done')\r\n","repo_name":"ThePythonist/Pishgaman-402-C","sub_path":"126.py","file_name":"126.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7235539968","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.font import Font\nfrom tkinter.ttk import Combobox\n\nfrom Database.db import Database\ndb = Database('Database/Database.db')\n\n\n# Class used for creating small window where date format is chosen\nclass DateFormatSettings:\n def __init__(self, app):\n self.format = None\n self.app = app\n self.window = Toplevel(self.app)\n self.open_settings_window()\n\n def save_settings(self):\n if db.get_settings():\n db.update_settings(1, self.format.get())\n else:\n db.add_settings(self.format.get())\n messagebox.showinfo(title=\"Success\", message=\"Saved!\")\n self.window.destroy()\n\n def open_settings_window(self):\n self.window.title(\"Settings\")\n self.window.resizable(0, 0)\n self.window['bg'] = '#778899'\n\n width = self.app.winfo_screenwidth()\n height = self.app.winfo_screenheight()\n self.window.geometry('%dx%d+%d+%d' % (300, 150, width // 2 - 150, height // 2 - 75))\n\n font = Font(family=\"Bookman Old Style\", size=16)\n text = Text(self.window, bg=\"lightgrey\", font=font, height=1)\n text.insert(INSERT, \"Select date format\")\n text.config(state=DISABLED)\n\n self.format = Combobox(self.window, width=27, state='readonly', text=\"Select format:\")\n self.format.bind(\"<>\", lambda e: self.window.focus())\n self.format['values'] = ('InvoiceNo/Month/Year', 'InvoiceNo/Year', 'InvoiceNo/Month')\n self.format.current(0)\n\n save = Button(self.window, text=\"Save\", command=lambda: self.save_settings())\n\n text.pack(padx=20, pady=10)\n self.format.pack()\n save.pack(pady=20)\n","repo_name":"DawidGrapa/Invoice-inator","sub_path":"PopUpWindows/dateFormatSettings.py","file_name":"dateFormatSettings.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"11477107677","text":"import numpy as np\nimport cvxopt as opt\nfrom cvxopt import blas, solvers\n\ndef rand_weights(n):\n k = np.random.rand(n)\n return k / sum(k)\n\n\ndef random_portfolio(returns):\n ''' \n Returns the mean and standard deviation of returns for a random portfolio\n '''\n\n p = np.asmatrix(np.mean(returns, axis=1))\n w = np.asmatrix(rand_weights(returns.shape[0]))\n C = np.asmatrix(np.cov(returns))\n \n mu = w * p.T\n sigma = np.sqrt(w * C * w.T)\n \n if sigma > 2:\n return random_portfolio(returns)\n return mu, sigma\n\n\n\n\ndef convert_portfolios(portfolios):\n port_list = []\n for portfolio in portfolios:\n temp = np.array(portfolio).T\n port_list.append(temp[0].tolist())\n \n return port_list\n\n\ndef optimal_portfolio(returns):\n n = len(returns)\n returns = np.asmatrix(returns)\n \n N = 100\n mus = [10**(5.0 * t/N - 1.0) for t in range(N)]\n \n S = opt.matrix(np.cov(returns)) #S is the covariance matrix. diagonal is the variance of each stock\n pbar = opt.matrix(np.mean(returns, axis=1))\n \n # Create constraint matrices\n G = -opt.matrix(np.eye(n)) # negative n x n identity matrix\n h = opt.matrix(0.0, (n ,1))\n A = opt.matrix(1.0, (1, n))\n b = opt.matrix(1.0)\n \n # Calculate efficient frontier weights using quadratic programming\n portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] \n for mu in mus]\n\n port_list = convert_portfolios(portfolios)\n \n \n returns = [blas.dot(pbar, x) for x in portfolios]\n risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] \n \n m1 = np.polyfit(returns, risks, 2)\n x1 = np.sqrt(m1[2] / m1[0])\n wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] #Is this the tangency portfolio? X1 = slope from origin? \n return np.asarray(wt), returns, risks, port_list\n","repo_name":"itomin/fiancee","sub_path":"efficient_frontier.py","file_name":"efficient_frontier.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12028751355","text":"\nfrom PyQt6 import QtWidgets, uic\nfrom pyqtgraph import PlotWidget\nimport pyqtgraph as pg\nimport sys\n\n\ndef main() -> None:\n app = QtWidgets.QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec())\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n uic.loadUi(\"placeholder_ex.ui\", self)\n self.plot([1, 2, 3, 4, 5,6, 7, 8, 9,10], [30, 32, 34, 32, 33, 31, 29, 32 ,35, 45])\n\n def plot(self, hour, temp):\n self.graphWidget.plot(hour, temp)\n \n\nif __name__==\"__main__\":\n main()\n\n\n","repo_name":"CarriageStart/program_etc","sub_path":"pyqt6/basics/ui_etc/designer/placeholder_ex.py","file_name":"placeholder_ex.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13532469595","text":"def solution(gems):\n answer = []\n allGems = set(gems)\n size = len(allGems)\n\n while size <= len(gems):\n for i in range(0, len(gems) - size + 1):\n sliceGems = gems[i:i + size]\n if set(sliceGems) == allGems:\n return [i + 1, i + size]\n size += 1\n\n return answer\n","repo_name":"eunna-lim/algorithm_study","sub_path":"eunna/level3/python3/67258.py","file_name":"67258.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15009900894","text":"import numpy as np\nimport pytest\n\nfrom pararealml.boundary_condition import (\n CauchyBoundaryCondition,\n ConstantBoundaryCondition,\n ConstantFluxBoundaryCondition,\n ConstantValueBoundaryCondition,\n DirichletBoundaryCondition,\n NeumannBoundaryCondition,\n vectorize_bc_function,\n)\n\n\ndef test_dirichlet_boundary_condition():\n bc = DirichletBoundaryCondition(lambda x, t: t * x)\n assert not bc.is_static\n assert bc.has_y_condition\n assert not bc.has_d_y_condition\n\n assert np.allclose(\n bc._y_condition(np.ones((2, 1)), 5.0), np.full((2, 1), 5.0)\n )\n\n with pytest.raises(RuntimeError):\n bc.d_y_condition(np.ones((2, 1)), 5.0)\n\n\ndef test_neumann_boundary_condition():\n bc = NeumannBoundaryCondition(\n lambda x, t: np.zeros((len(x), 2)), is_static=True\n )\n assert bc.is_static\n assert not bc.has_y_condition\n assert bc.has_d_y_condition\n\n assert np.allclose(\n bc.d_y_condition(np.ones((7, 3)), 5.0), np.zeros((7, 2))\n )\n\n with pytest.raises(RuntimeError):\n bc.y_condition(np.ones((7, 1)), 5.0)\n\n\ndef test_cauchy_boundary_condition():\n bc = CauchyBoundaryCondition(lambda x, t: t * x, lambda x, t: x**2 - t)\n assert not bc.is_static\n assert bc.has_y_condition\n assert bc.has_d_y_condition\n\n assert np.allclose(\n bc.y_condition(np.full((4, 5), 2.0), 7.0), np.full((4, 5), 14.0)\n )\n\n assert np.allclose(\n bc.d_y_condition(np.full((5, 2), 2.0), 5.0), np.full((5, 2), -1.0)\n )\n\n\ndef test_constant_boundary_condition_with_both_conditions_none():\n with pytest.raises(ValueError):\n ConstantBoundaryCondition(None, None)\n\n\ndef test_constant_boundary_condition():\n bc = ConstantBoundaryCondition([1.0, None], [None, -1.0])\n assert bc.is_static\n assert bc.has_y_condition\n assert bc.has_d_y_condition\n\n y_condition = bc.y_condition(np.full((4, 5), 2.0), 7.0)\n assert y_condition.shape == (4, 2)\n assert np.all(y_condition[:, 0] == 1.0)\n assert np.all([y_value is None for y_value in y_condition[:, 1]])\n\n d_y_condition = bc.d_y_condition(np.full((5, 2), 2.0), 5.0)\n assert d_y_condition.shape == (5, 2)\n assert np.all([d_y_value is None for d_y_value in d_y_condition[:, 0]])\n assert np.all(d_y_condition[:, 1] == -1.0)\n\n\ndef test_constant_value_boundary_condition():\n bc = ConstantValueBoundaryCondition([5.0])\n\n assert bc.is_static\n assert bc.has_y_condition\n assert not bc.has_d_y_condition\n\n y_condition = bc.y_condition(np.full((3, 1), 2.0), 7.0)\n assert y_condition.shape == (3, 1)\n assert np.all(y_condition == 5.0)\n\n with pytest.raises(RuntimeError):\n bc.d_y_condition(np.ones((2, 1)), 0.0)\n\n\ndef test_constant_flux_boundary_condition():\n bc = ConstantFluxBoundaryCondition([-3.0])\n\n assert bc.is_static\n assert not bc.has_y_condition\n assert bc.has_d_y_condition\n\n d_y_condition = bc.d_y_condition(np.full((5, 3), 12.0), 2.0)\n assert d_y_condition.shape == (5, 1)\n assert np.all(d_y_condition == -3.0)\n\n with pytest.raises(RuntimeError):\n bc.y_condition(np.ones((4, 2)), 0.0)\n\n\ndef test_vectorize_bc_function():\n vectorized_function = vectorize_bc_function(\n lambda x, t: (x[0], -1.0, 2 * x[1], None, t**2)\n )\n input_x = np.arange(12).reshape(6, 2)\n input_t = 3.0\n expected_output = [\n [0.0, -1.0, 2.0, np.nan, 9.0],\n [2.0, -1.0, 6.0, np.nan, 9.0],\n [4.0, -1.0, 10.0, np.nan, 9.0],\n [6.0, -1.0, 14.0, np.nan, 9.0],\n [8.0, -1.0, 18.0, np.nan, 9.0],\n [10.0, -1.0, 22.0, np.nan, 9.0],\n ]\n output = vectorized_function(input_x, input_t)\n assert np.array_equal(output, expected_output, equal_nan=True)\n","repo_name":"ViktorC/PararealML","sub_path":"tests/test_boundary_condition.py","file_name":"test_boundary_condition.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"81"} +{"seq_id":"73017363464","text":"\"\"\"\nAuthor: Joshua Ashkinaze\nDate: 2023-12-04\n\nDescription: Scrapes fiction book descriptions from FictionDB\n\"\"\"\n\nimport argparse\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom datetime import datetime\nimport time\nimport random\nimport ftfy\nimport logging\nimport os\n\nLOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s'\nlogging.basicConfig(filename=f'{os.path.basename(__file__)}.log', level=logging.INFO, format=LOG_FORMAT, datefmt='%Y-%m-%d %H:%M:%S', filemode='w')\n\n# Function to generate URLs for each month from 2019 to 2023\ndef generate_urls(start_date, end_date):\n start_year, start_month, _ = map(int, start_date.split('-'))\n end_year, end_month, _ = map(int, end_date.split('-'))\n months = [\"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\", \"jul\", \"aug\", \"sep\", \"oct\", \"nov\", \"dec\"]\n urls = []\n\n for year in range(start_year, end_year + 1):\n start_m = start_month if year == start_year else 1\n end_m = end_month if year == end_year else 12\n for m in range(start_m, end_m + 1):\n month = months[m - 1]\n url = f\"https://www.fictiondb.com/new-releases/new-books-by-month.htm?date={month}-{year}\"\n urls.append(url)\n return urls\n\n# Function to get book description from its link\ndef get_book_description(link):\n try:\n headers = {'User-Agent': 'Mozilla/5.0'}\n response = requests.get(link, headers=headers)\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, 'html.parser')\n description_tag = soup.find('div', {'class': 'tab-pane fade show active', 'id': 'description'})\n if description_tag:\n description = description_tag.get_text(strip=True)\n return ftfy.fix_encoding(description) # Fixing encoding issues\n return None\n except Exception as e:\n logging.info(f\"Error fetching book description: {e}\")\n return None\n\n# Function to scrape books for a specific month\ndef scrape_books_for_month(url):\n logging.info(f\"Scraping {url}\")\n try:\n headers = {'User-Agent': 'Mozilla/5.0'}\n response = requests.get(url, headers=headers)\n if response.status_code != 200:\n logging.info(f\"Failed to fetch {url}\")\n return []\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n book_rows = soup.find_all(\"tr\", class_=[\"g\", \"p\", \"r\"])\n books = []\n\n for row in book_rows:\n book = {}\n book[\"author\"] = row.find(\"a\", itemprop=\"author\").get_text(strip=True)\n book_details_tag = row.find(\"a\", itemprop=\"url\")\n book[\"title\"] = book_details_tag.find(\"span\", itemprop=\"name\").get_text(strip=True)\n book[\"link\"] = book_details_tag['href'].replace(\"..\", \"https://www.fictiondb.com/\")\n book[\"genre\"] = row.find(\"span\", itemprop=\"genre\").get_text(strip=True)\n book[\"date\"] = row.find(\"span\", itemprop=\"datePublished\").get_text(strip=True)\n series_tag = row.find(\"td\", class_=\"d-none d-xl-table-cell\")\n book[\"series\"] = series_tag.get_text(strip=True) if series_tag else None\n book[\"description\"] = get_book_description(book['link'])\n books.append(book)\n\n if len(books) % 10 == 0:\n rand_sleep = random.random()*3\n time.sleep(rand_sleep)\n\n return books\n\n except Exception as e:\n logging.info(f\"Error scraping {url}: {e}\")\n return []\n\ndef main():\n parser = argparse.ArgumentParser(description='Scrape FictionDB for book descriptions.')\n parser.add_argument('--start_date', default=\"2018-01-01\", type=str, help='Start date in YYYY-MM-DD format')\n parser.add_argument('--end_date', default=\"2023-01-01\", type=str, help='End date in YYYY-MM-DD format')\n parser.add_argument('--d', action='store_true', help='Debug mode: scrape only one page')\n\n args = parser.parse_args()\n\n logging.info(f\"Scraping FictionDB with parameters {str(args)}\")\n\n urls = generate_urls(args.start_date, args.end_date)\n\n if args.d:\n urls = urls[:1] # In debug mode, scrape only the first URL\n\n results = Parallel(n_jobs=-1)(delayed(scrape_books_for_month)(url) for url in urls)\n all_books = [book for monthly_books in results for book in monthly_books]\n df = pd.DataFrame(all_books)\n df.to_csv(f\"{args.start_date}_{args.end_date}_fiction.csv\", index=False)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"josh-ashkinaze/ood","sub_path":"fetch_fiction.py","file_name":"fetch_fiction.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24492358029","text":"'''\nModule which contains the user defined colormaps\n\nContains:\n---------\nshow_all_colormaps : print all colormaps in a figure\njetmod : colormap which contains no yellow\njetext : extension to jet which starts at black and ends at white\nbinary : red (False) and green (True) colormap without intermediate colors\ntraffic_light : red, yellow, green colormap\n\n# private functions\n_gen_cmap_output : support function which creates an actual Colormap object.\n\nSee Also:\n---------\nmatplotlib.colors : a submodule containing stuff on colors and colormaps\nmatplotlib.colors.LinearSegmentedColormap : creates a colormap object from a 3x1 matrix of values\n'''\n\nimport numpy as np\nfrom matplotlib.colors import LinearSegmentedColormap, Normalize\nfrom numpy import ma\n\n__all__ = ['PercNorm', 'MaxContrastNorm', 'show_all_colormaps', 'jetmod', 'jetext',\n 'traffic_light', 'binary']\n\n\n# generate output based on inputs.. colormaps are defined per marker_array\nclass PercNorm(Normalize):\n '''\n Normalize a given value to the 0-1 range depending on a percentile bracket\n\n That is, the vmin and vmax values are calculated based on the percentiles of the data content\n\n *PercNorm* is a child of *matplotlib.colors.Normalize*, and is a small wrapper around this to\n calculate the vmin and vmax based on the percentiles of the data content.\n '''\n\n def __init__(self, pmin=0, pmax=100, clip=False):\n '''\n Keyword Arguments:\n ------------------\n pmin : (0) int or float\n The bottom of the percentile brackets in percentages (from 0 to 100)\n pmax : (100) int or float\n The top of the percentile bracket in percentages (from 0 to 100)\n clip : (False) bool\n is passed to *matplotlib.colors.Normalize*. Check this for more information\n\n '''\n self.pmin = pmin\n self.pmax = pmax\n\n # call init in superclass\n super().__init__(vmin=None, vmax=None, clip=clip)\n\n def __call__(self, value, clip=None):\n '''\n A call to calculate the colormap based on data/value\n\n Argument:\n ---------\n value : ndarray of scalars, or scalar\n the data of which the image is made up. Generally for colormaps this is a 2D set\n clip : See *matplotlib.colors.Normalize* for more information\n\n Returns:\n --------\n result : see *matplotlib.colors.Normalize* for more information\n '''\n\n result, is_scalar = self.process_value(value)\n\n # set vmin, vmax based on pmin and pmax\n self.vmin = np.nanpercentile(result.ravel(), self.pmin)\n self.vmax = np.nanpercentile(result.ravel(), self.pmax)\n\n return super().__call__(value, clip=clip)\n\n\nclass MaxContrastNorm(Normalize):\n '''\n normalize a given value to the 0-1 range depending on data densities\n '''\n\n # matched normalization\n def __call__(self, value, clip=None):\n \"\"\"\n Normalize *value* data in the ``[vmin, vmax]`` interval into\n the ``[0.0, 1.0]`` interval and return it. *clip* defaults\n to *self.clip* (which defaults to *False*). If not already\n initialized, *vmin* and *vmax* are initialized using\n *autoscale_None(value)*.\n \"\"\"\n if clip is None:\n clip = self.clip\n\n result, is_scalar = self.process_value(value)\n\n # set vmin, vmax in case they are still None\n self.autoscale_None(result)\n vmin, vmax = self.vmin, self.vmax\n if vmin == vmax:\n result.fill(0) # Or should it be all masked? Or 0.5?\n elif vmin > vmax:\n raise ValueError(\"minvalue must be less than or equal to maxvalue\")\n else:\n vmin = float(vmin)\n vmax = float(vmax)\n if clip:\n mask = ma.getmask(result)\n result = ma.array(np.clip(result.filled(vmax), vmin, vmax),\n mask=mask)\n # ma division is very slow; we can take a shortcut\n # use np.asarray so data passed in as an ndarray subclass are\n # interpreted as an ndarray. See issue #6622.\n resdat = np.asarray(result.data)\n\n # give sorting index\n isort = np.unique(resdat.ravel(), return_inverse=True)[1]\n print(isort)\n print(isort.min(), isort.max())\n resdat = isort.reshape(*resdat.shape).astype(float)\n resdat -= resdat.min()\n resdat /= resdat.max()\n print(resdat.dtype)\n result = np.ma.array(resdat.astype(float), mask=result.mask, copy=False)\n print(resdat.dtype)\n if is_scalar:\n result = result[0]\n\n print(np.unique(result))\n print(result.dtype)\n return result\n\n\ndef show_all_colormaps():\n '''\n Shows all colormaps in an - unblocked - figure\n\n Author:\n -------\n Joris Kampman, Thales NL, 2017\n '''\n\n import matplotlib.pyplot as plt\n\n nof_steps = 256\n cmaps_to_show = ['jetmod', 'jetext', 'traffic_light', 'binary']\n # cmaps_to_show = ['jetmod']\n\n mat = np.linspace(0, 1, nof_steps).reshape((1, -1))\n mat = np.vstack((mat, mat))\n\n fig, axs = plt.subplots(len(cmaps_to_show), 4, num='All defined subplots',\n subplot_kw=dict(xticks=[], yticks=[]))\n\n for iax, cmap_str in enumerate(cmaps_to_show):\n # axs.append(plt.subplot(gs[iax, 0]))\n cmap = eval('{:s}(nof_steps=nof_steps, interpolation=\"linear\")'.format(cmap_str))\n cmapnnb = eval('{:s}(nof_steps=nof_steps, interpolation=\"nearest\")'.format(cmap_str))\n cmapi = eval('{:s}(nof_steps=nof_steps, interpolation=\"linear\", invert=True)'.format(cmap_str))\n cmapn = eval('{:s}(nof_steps=nof_steps, invert=False, negative=True, interpolation=\"linear\")'\n .format(cmap_str))\n\n axs[iax, 0].imshow(mat, cmap=cmap, aspect='auto')\n axs[iax, 0].set_title(cmap_str)\n axs[iax, 1].imshow(mat, cmap=cmapi, aspect='auto')\n axs[iax, 1].set_title(cmap_str + '(inverted)')\n axs[iax, 2].imshow(mat, cmap=cmapn, aspect='auto')\n axs[iax, 2].set_title(cmap_str + '(negative)')\n axs[iax, 3].imshow(mat, cmap=cmapnnb, aspect='auto')\n axs[iax, 3].set_title(cmap_str + '(markers only)')\n\n plt.show(block=False)\n\n return None\n\n\ndef _gen_cmap_output(marker_array, nof_steps, istep, invert=False, negative=False,\n interpolation='linear', name=None):\n '''\n Creates an actual colormap object via matplotlib.colors.LinearSegmentedColormap\n\n Arguments:\n ----------\n marker_array : ndarray of float 3-tuples\n contains the rgb tuples of the markers\n nof_steps : int\n Number of steps in the colormap\n istep : {int, None, 'vector'}\n Main switch to indicate what to return:\n - an integer, results in a single RGB 3-tuple to be returned\n - None, results in the creation of a colormap object which is returned\n - 'vector', results in the return of a Nx3 ndarray of all RGB colors in the colormap\n invert : bool [optional]\n Whether to invert the colormap\n negative : bool [optional]\n Whether to take the negative of the specified colors\n interpolation : {'linear', 'nearest'}\n 'linear' implies linear interpolation between markers\n 'nearest' implies no interpolation and only the marker colors are valid colors\n name : str or None [optional]\n The name of the colormap to create\n\n Returns:\n --------\n In case istep=None, the function returns a colormapobject via the function matplotlib.colors.\n LinearSegmentedColormap function.\n\n In case \"istep\" is an integer the color at step \"istep\" is returned as an 3-tuple of rgb values\n\n In case istep='vector', the 3xN ndarray with rgb values is returned, giving the full set of\n colors, with N is \"nof_steps\"\n\n See Also:\n ---------\n matplotlib.colors : submodule containing information on colors and colormaps\n matplotlib.colors.LinearSegmentedColormap : creates a colormap object from a RGB input\n '''\n\n # check if interpolation is nearest, then create double markers to prevent interpolation\n if interpolation == 'nearest':\n nof_markers = marker_array.shape[0]\n marker_array_new = np.zeros(((nof_markers - 1)*3 + 1, 4), dtype=np.float)\n marker_array_new[[0, -1], :] = marker_array[[0, -1], :]\n\n for imarker in range(1, nof_markers):\n inew = imarker*3 + np.array([-2, -1, 0], dtype=np.int)\n rel_pos = (marker_array[imarker, 0] + marker_array[imarker-1, 0])/2\n marker_array_new[inew[0], 0] = rel_pos - 0.0001\n marker_array_new[inew[1], 0] = rel_pos + 0.0001\n marker_array_new[inew[0], 1:] = marker_array[imarker-1, 1:]\n marker_array_new[inew[1], 1:] = marker_array[imarker, 1:]\n marker_array_new[inew[2], :] = marker_array[imarker, :]\n\n marker_array = marker_array_new\n\n # if linear -> no action\n elif interpolation == 'linear':\n pass\n\n # else: unknown value for keyword \"interpolation\"\n else:\n raise ValueError('[keyword error] \"interpolation=\\'{}\\'\" is not a valid value'\n .format(interpolation))\n\n # invert colormap order (e.g., b -> w becomes w -> b)\n if invert:\n # flip up/down\n marker_array = np.flipud(marker_array)\n # change first column\n marker_array[:, 0] = 1 - marker_array[:, 0]\n\n # take the negative of the colors (e.g., [1, 1, 0] becomes [0, 0, 1])\n if negative:\n marker_array[:, 1:] = 1.0 - marker_array[:, 1:]\n\n # =================================================\n # switch based on what keyword \"istep\" is ...\n\n # istep = None -> return colormap object\n if istep is None: # create in colormap format\n cdict = dict(red=[], green=[], blue=[])\n for row in marker_array:\n cdict['red'].append((row[0], row[1], row[1]))\n cdict['green'].append((row[0], row[2], row[2]))\n cdict['blue'].append((row[0], row[3], row[3]))\n\n return LinearSegmentedColormap(name, cdict, nof_steps)\n\n # istep = 'vector' -> create Nx3 matrix with RGB 3-tuples\n elif istep == 'vector':\n return_vector = np.zeros((nof_steps, 3), dtype=np.float)\n for istep in range(nof_steps):\n posi = np.linspace(0, 1, nof_steps)[istep]\n redi = np.interp(posi, marker_array[:, 0], marker_array[:, 1])\n greeni = np.interp(posi, marker_array[:, 0], marker_array[:, 2])\n bluei = np.interp(posi, marker_array[:, 0], marker_array[:, 3])\n\n return_vector[istep, :] = [redi, greeni, bluei]\n\n return return_vector\n\n # istep is an integer -> give single RGB 3-tuple\n elif type(istep) == int:\n # do interpolation and pick index\n posi = np.linspace(0, 1, nof_steps)[istep]\n redi = np.interp(posi, marker_array[:, 0], marker_array[:, 1])\n greeni = np.interp(posi, marker_array[:, 0], marker_array[:, 2])\n bluei = np.interp(posi, marker_array[:, 0], marker_array[:, 3])\n\n return (redi, greeni, bluei)\n\n # else: exception\n else:\n raise ValueError('[keyword error] The value \"istep\"=\\'{}\\' is not valid'.format(istep))\n\n\ndef jetmod(nof_steps=256, istep=None, bright=False, invert=False, negative=False,\n interpolation='linear'):\n '''\n Modified 'jet' colormap. Yellow is removed for types like Geert Onstenk ...\n\n Arguments:\n ----------\n nof_steps : int [optional]\n The number of colors in the colormap\n istep : {int, None, 'vector'} [optional]\n Main switch to indicate what to return:\n - an integer, results in a single RGB 3-tuple to be returned\n - None, results in the creation of a colormap object which is returned\n - 'vector', results in the return of a Nx3 ndarray of all RGB colors in the colormap\n bright : bool [optional]\n Whether to have bright varieties of red and blue at the endings or the default dark ones\n invert : bool [optional]\n Whether to invert the colormap\n negative : bool [optional]\n Whether to take the negative of the specified colors\n interpolation : {'linear', 'nearest'}\n 'linear' implies linear interpolation between markers\n 'nearest' implies no interpolation and only the marker colors are valid colors\n\n Returns:\n --------\n In case \"istep=None\", the function returns a colormapobject via the function matplotlib.colors.\n LinearSegmentedColormap function.\n\n In case \"istep=\", the color at step \"istep\" is returned as an 3-tuple of rgb values\n\n In case \"istep='vector'\", the 3xN ndarray with rgb values is returned, giving the full set of\n colors, with N is \"nof_steps\"\n\n See Also:\n ---------\n matplotlib.colors : submodule containing information on colors and colormaps\n matplotlib.colors.LinearSegmentedColormap : creates a colormap object from a RGB input\n '''\n\n marker_array = np.array([[0, 0, 0, 143],\n [32, 0, 0, 255],\n [76, 0, 255, 255],\n [128, 0, 255, 0],\n [159, 255, 153, 0],\n [223, 255, 0, 0],\n [255, 128, 0, 0]], dtype='float')/255\n\n # if bright then ditch darker edge markers (reduce list by 1 on both sides)\n if bright:\n marker_array = marker_array[slice(1, -1), :]\n # stretch positions\n positions = marker_array[:, 0]\n positions -= positions[0]\n positions /= positions[-1]\n marker_array[:, 0] = positions\n\n return _gen_cmap_output(marker_array, nof_steps, istep, invert, negative, interpolation,\n name='jetmod')\n\n\ndef jetext(nof_steps=256, istep=None, invert=False, negative=False, interpolation='linear'):\n '''\n Modified 'jet' colormap which extends the lower values to black, and the upper values to white\n\n Arguments:\n ----------\n nof_steps : int [optional]\n The number of colors in the colormap\n istep : {int, None, 'vector'} [optional]\n Main switch to indicate what to return:\n - an integer, results in a single RGB 3-tuple to be returned\n - None, results in the creation of a colormap object which is returned\n - 'vector', results in the return of a Nx3 ndarray of all RGB colors in the colormap\n invert : bool [optional]\n Whether to invert the colormap\n negative : bool [optional]\n Whether to take the negative of the specified colors\n interpolation : {'linear', 'nearest'}\n 'linear' implies linear interpolation between markers\n 'nearest' implies no interpolation and only the marker colors are valid colors\n\n Returns:\n --------\n In case \"istep=None\", the function returns a colormapobject via the function matplotlib.colors.\n LinearSegmentedColormap function.\n\n In case \"istep=\", the color at step \"istep\" is returned as an 3-tuple of rgb values\n\n In case \"istep='vector'\", the 3xN ndarray with rgb values is returned, giving the full set of\n colors, with N is \"nof_steps\"\n\n See Also:\n ---------\n matplotlib.colors : submodule containing information on colors and colormaps\n matplotlib.colors.LinearSegmentedColormap : creates a colormap object from a RGB input\n '''\n\n marker_array = np.array([[0, 0, 0, 0],\n [79, 0, 0, 255],\n [142, 0, 255, 0],\n [173, 255, 255, 0],\n [236, 255, 0, 0],\n [255, 255, 255, 255]], dtype='float')/255\n\n return _gen_cmap_output(marker_array, nof_steps, istep, invert, negative, interpolation,\n name='jetext')\n\n\ndef traffic_light(nof_steps=256, istep=None, invert=False, negative=False, interpolation='linear'):\n '''\n colormap consisting of colors with markers red, yellow and green (like a traffic light)\n\n Arguments:\n ----------\n nof_steps : int [optional]\n The number of colors in the colormap\n istep : {int, None, 'vector'} [optional]\n Main switch to indicate what to return:\n - an integer, results in a single RGB 3-tuple to be returned\n - None, results in the creation of a colormap object which is returned\n - 'vector', results in the return of a Nx3 ndarray of all RGB colors in the colormap\n invert : bool [optional]\n Whether to invert the colormap\n negative : bool [optional]\n Whether to take the negative of the specified colors\n interpolation : {'linear', 'nearest'}\n 'linear' implies linear interpolation between markers\n 'nearest' implies no interpolation and only the marker colors are valid colors\n\n Returns:\n --------\n In case \"istep=None\", the function returns a colormapobject via the function matplotlib.colors.\n LinearSegmentedColormap function.\n\n In case \"istep=\", the color at step \"istep\" is returned as an 3-tuple of rgb values\n\n In case \"istep='vector'\", the 3xN ndarray with rgb values is returned, giving the full set of\n colors, with N is \"nof_steps\"\n\n See Also:\n ---------\n matplotlib.colors : submodule containing information on colors and colormaps\n matplotlib.colors.LinearSegmentedColormap : creates a colormap object from a RGB input\n '''\n\n marker_array = np.array([[0.0, 1, 0, 0],\n [0.5, 1, 1, 0],\n [1.0, 0, 1, 0]], dtype=np.float)\n\n return _gen_cmap_output(marker_array, nof_steps, istep, invert, negative, interpolation,\n name='traffic_light')\n\n\ndef binary(nof_steps=2, istep=None, invert=False, negative=False, interpolation='nearest'):\n '''\n Colormap for displaying true or false images. consists of two colors and no interpolation done\n\n Arguments:\n ----------\n nof_steps : int [optional]\n The number of colors in the colormap\n istep : {int, None, 'vector'} [optional]\n Main switch to indicate what to return:\n - an integer, results in a single RGB 3-tuple to be returned\n - None, results in the creation of a colormap object which is returned\n - 'vector', results in the return of a Nx3 ndarray of all RGB colors in the colormap\n invert : bool [optional]\n Whether to invert the colormap\n negative : bool [optional]\n Whether to take the negative of the specified colors\n interpolation : {'linear', 'nearest'}\n 'linear' implies linear interpolation between markers\n 'nearest' implies no interpolation and only the marker colors are valid colors\n\n Returns:\n --------\n In case \"istep=None\", the function returns a colormapobject via the function matplotlib.colors.\n LinearSegmentedColormap function.\n\n In case \"istep=\", the color at step \"istep\" is returned as an 3-tuple of rgb values\n\n In case \"istep='vector'\", the 3xN ndarray with rgb values is returned, giving the full set of\n colors, with N is \"nof_steps\"\n\n See Also:\n ---------\n matplotlib.colors : submodule containing information on colors and colormaps\n matplotlib.colors.LinearSegmentedColormap : creates a colormap object from a RGB input\n '''\n marker_array = np.array([[0.0, 1, 0, 0],\n [1.0, 0, 1, 0]], dtype=np.float)\n\n return _gen_cmap_output(marker_array, nof_steps, istep, invert, negative, interpolation,\n name='binary')\n","repo_name":"joriskampman/auxtools","sub_path":"cmaps.py","file_name":"cmaps.py","file_ext":"py","file_size_in_byte":18888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72036220104","text":"#!/usr/bin/python\n\n# This script is to extract Illumina barcodes\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport getopt\nimport csv\n\nimport argparse\nimport glob\nimport re\nfrom subprocess import call\nfrom datetime import datetime\n\nfrom new_submit_to_taskrunner import call_to_taskrunner\nimport traceback\n\n# Get read structure from RunInfo.xml\ndef get_read_structure(x):\n posts = 'TBT'\n i = 0\n str = ''\n with open(x, 'r') as fin:\n for line in fin:\n line = line.strip(' \\t\\n')\n if (line.startswith('= 3):\n break;\n fin.close()\n return str\n\n\n# Get tile information from RunInfo.xml\ndef get_tiles(x, lane):\n tiles = []\n with open(x, 'r') as fin:\n for line in fin:\n line = line.strip(' \\t\\n')\n if (line.startswith('', 0)):\n l = line[6:].split('<')[0]\n if (l.split('_')[0] == lane):\n tiles.append(l.split('_')[1])\n fin.close()\n tiles.sort()\n return tiles\n \n\n# Convert string to boolean\ndef str2bool(s):\n return s.lower() == \"true\"\n\n \n# Write to log file\ndef write_log(log_file, flowcell_barcode, log_string):\n now = datetime.now()\n dt_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n with open(log_file, \"a\") as logfile:\n logfile.write(dt_string+\" [Slide-seq Flowcell Alignment Workflow - \"+flowcell_barcode+\"]: \"+log_string+\"\\n\")\n logfile.close()\n \n\ndef main():\n if len(sys.argv) != 3:\n print(\"Please provide two arguments: manifest file and lane ID!\")\n sys.exit()\n \n manifest_file = sys.argv[1]\n lane = sys.argv[2]\n\n # Check if the manifest file exists\n if not os.path.isfile(manifest_file):\n print(\"File {} does not exist. Exiting...\".format(manifest_file))\n sys.exit()\n\n # Read manifest file\n options = {}\n with open(manifest_file,\"r\") as fp:\n for line in fp:\n dict = line.rstrip().split(\"=\")\n options[dict[0]] = dict[1]\n fp.close()\n \n flowcell_directory = options['flowcell_directory']\n output_folder = options['output_folder']\n metadata_file = options['metadata_file']\n flowcell_barcode = options['flowcell_barcode']\n \n library_folder = options['library_folder'] if 'library_folder' in options else '{}/libraries'.format(output_folder)\n tmpdir = options['temp_folder'] if 'temp_folder' in options else '{}/tmp'.format(output_folder)\n dropseq_folder = options['dropseq_folder'] if 'dropseq_folder' in options else '/broad/macosko/bin/dropseq-tools'\n picard_folder = options['picard_folder'] if 'picard_folder' in options else '/broad/macosko/bin/dropseq-tools/3rdParty/picard'\n STAR_folder = options['STAR_folder'] if 'STAR_folder' in options else '/broad/macosko/bin/dropseq-tools/3rdParty/STAR-2.5.2a'\n scripts_folder = options['scripts_folder'] if 'scripts_folder' in options else '/broad/macosko/jilong/slideseq_pipeline/scripts'\n is_NovaSeq = str2bool(options['is_NovaSeq']) if 'is_NovaSeq' in options else False\n is_NovaSeq_S4 = str2bool(options['is_NovaSeq_S4']) if 'is_NovaSeq_S4' in options else False\n num_slice_NovaSeq = int(options['num_slice_NovaSeq']) if 'num_slice_NovaSeq' in options else 10\n num_slice_NovaSeq_S4 = int(options['num_slice_NovaSeq_S4']) if 'num_slice_NovaSeq_S4' in options else 40\n email_address = options['email_address'] if 'email_address' in options else ''\n \n basecalls_dir = '{}/Data/Intensities/BaseCalls'.format(flowcell_directory)\n log_file = '{}/logs/workflow.log'.format(output_folder)\n \n # Get read structure from RunInfo.xml\n runinfo_file = '{}/RunInfo.xml'.format(flowcell_directory)\n read_structure = get_read_structure(runinfo_file)\n \n # Get tile information from RunInfo.xml\n slice_id = {}\n slice_first_tile = {}\n slice_tile_limit = {}\n tile_nums = get_tiles(runinfo_file, lane)\n tile_cou = len(tile_nums)\n if ((not is_NovaSeq) and (not is_NovaSeq_S4)):\n slice_id[lane] = ['0']\n slice_first_tile[lane] = [str(tile_nums[0])]\n slice_tile_limit[lane] = [str(tile_cou)]\n else:\n slice_cou = num_slice_NovaSeq if is_NovaSeq else num_slice_NovaSeq_S4\n tile_cou_per_slice = (tile_cou // slice_cou) + 1\n slice_id[lane] = []\n slice_first_tile[lane] = []\n slice_tile_limit[lane] = []\n for i in range(slice_cou):\n if (tile_cou_per_slice * i >= tile_cou):\n break\n slice_id[lane].append(str(i))\n slice_first_tile[lane].append(str(tile_nums[tile_cou_per_slice * i]))\n slice_tile_limit[lane].append(str(tile_cou_per_slice))\n \n folder_running = '{}/status/running.processbarcodes_lane_{}'.format(output_folder, lane)\n folder_finished = '{}/status/finished.processbarcodes_lane_{}'.format(output_folder, lane)\n folder_failed = '{}/status/failed.processbarcodes_lane_{}'.format(output_folder, lane)\n \n try:\n call(['mkdir', '-p', folder_running])\n \n now = datetime.now()\n dt_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n print(dt_string)\n \n # Extract Illumina barcodes\n commandStr = 'java -Djava.io.tmpdir='+tmpdir+' -XX:+UseParallelOldGC -XX:ParallelGCThreads=1 -XX:GCTimeLimit=50 -XX:GCHeapFreeLimit=10 -Xmx4000m '\n commandStr += '-jar '+picard_folder+'/picard.jar ExtractIlluminaBarcodes TMP_DIR='+tmpdir+' VALIDATION_STRINGENCY=SILENT '\n commandStr += 'BASECALLS_DIR='+basecalls_dir+' OUTPUT_DIR='+output_folder+'/'+lane+'/barcodes LANE='+lane+' '\n commandStr += 'READ_STRUCTURE='+read_structure+' BARCODE_FILE='+output_folder+'/'+lane+'/barcode_params.txt '\n commandStr += 'METRICS_FILE='+output_folder+'/'+lane+'/'+flowcell_barcode+'.'+lane+'.barcode_metrics COMPRESS_OUTPUTS=true NUM_PROCESSORS=4'\n write_log(log_file, flowcell_barcode, \"ExtractIlluminaBarcodes for Lane \"+lane+\" Command=\"+commandStr)\n os.system(commandStr)\n write_log(log_file, flowcell_barcode, \"ExtractIlluminaBarcodes for Lane \"+lane+\" is done. \")\n \n # Convert Illumina base calls to sam (unmapped.bam)\n for i in range(len(slice_id[lane])):\n commandStr = 'java -Djava.io.tmpdir='+tmpdir+' -XX:+UseParallelOldGC -XX:ParallelGCThreads=1 -XX:GCTimeLimit=50 -XX:GCHeapFreeLimit=10 -Xmx10192m '\n commandStr += '-jar '+picard_folder+'/picard.jar IlluminaBasecallsToSam TMP_DIR='+tmpdir+' VALIDATION_STRINGENCY=SILENT '\n commandStr += 'BASECALLS_DIR='+basecalls_dir+' LANE='+lane+' RUN_BARCODE='+flowcell_barcode+' NUM_PROCESSORS=4 '\n commandStr += 'READ_STRUCTURE='+read_structure+' LIBRARY_PARAMS='+output_folder+'/'+lane+'/'+slice_id[lane][i]+'/library_params.txt INCLUDE_NON_PF_READS=false '\n commandStr += 'APPLY_EAMSS_FILTER=false MAX_READS_IN_RAM_PER_TILE=600000 ADAPTERS_TO_CHECK=null IGNORE_UNEXPECTED_BARCODES=true'\n commandStr += ' SEQUENCING_CENTER=BI BARCODES_DIR='+output_folder+'/'+lane+'/barcodes FIRST_TILE='+slice_first_tile[lane][i]+' TILE_LIMIT='+slice_tile_limit[lane][i]\n\n output_file = '{}/logs/run_barcodes2sam_lane_{}_{}.log'.format(output_folder, lane, slice_id[lane][i])\n submission_script = '{}/run_barcodes2sam.sh'.format(scripts_folder)\n call_args = ['qsub', '-o', output_file, '-l', 'h_vmem=100G', '-notify', '-l', 'h_rt=50:0:0', '-j', 'y', '-P', 'macosko_lab', '-l', 'os=RedHat7', submission_script, manifest_file, commandStr, lane, slice_id[lane][i], scripts_folder, output_folder, '{}/{}'.format(output_folder, lane)]\n call_to_taskrunner(output_folder, call_args)\n \n now = datetime.now()\n dt_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n print(dt_string)\n \n call(['mv', folder_running, folder_finished])\n except Exception as exp:\n print(\"EXCEPTION:!\")\n print(exp)\n traceback.print_tb(exp.__traceback__, file=sys.stdout)\n if os.path.isdir(folder_running):\n call(['mv', folder_running, folder_failed])\n else:\n call(['mkdir', '-p', folder_failed])\n \n if len(email_address) > 1:\n subject = \"Slide-seq workflow failed for \" + flowcell_barcode\n content = \"The Slide-seq workflow for lane \"+lane+\" failed at the step of processing barcodes. Please check the log file for the issues. \"\n call_args = ['python', '{}/send_email.py'.format(scripts_folder), email_address, subject, content]\n call(call_args)\n \n sys.exit()\n \n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"hiraksarkar/slideseq-processing","sub_path":"scripts/run_processbarcodes.py","file_name":"run_processbarcodes.py","file_ext":"py","file_size_in_byte":8800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14824823604","text":"import torch as t\nfrom torch import nn\nfrom model import UnetDoublePlus\nimport os\nimport json\nimport cv2\nfrom copy import deepcopy\nimport numpy as np\nimport shutil\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nconf = json.load(open(\"conf.json\", \"r\", encoding=\"utf-8\"))\npredict_conf = conf[\"predict\"]\ndata_dir = predict_conf[\"data_dir\"]\nimg_suffix = predict_conf[\"img_suffix\"]\nimg_size = predict_conf[\"img_size\"]\nuse_best_model = predict_conf[\"use_best_model\"]\nnum_classes = predict_conf[\"num_classes\"]\nbackbone_type = predict_conf[\"backbone_type\"]\nis_deconv = predict_conf[\"is_deconv\"]\ndownsample_use_pool = predict_conf[\"downsample_use_pool\"]\nis_deep_sup = predict_conf[\"is_deep_sup\"]\nsave_result = predict_conf[\"save_result\"]\nshow_result = predict_conf[\"show_result\"]\nsoftmax_op = nn.Softmax(dim=1)\n\n\ndef load_model():\n model = UnetDoublePlus(backbone_type=backbone_type, is_deconv=is_deconv, num_classes=num_classes, downsample_use_pool=downsample_use_pool, is_deep_sup=is_deep_sup)\n model = nn.DataParallel(module=model, device_ids=[0])\n if use_best_model:\n model.load_state_dict(t.load(\"best.pth\"))\n else:\n model.load_state_dict(t.load(\"epoch.pth\"))\n model = model.cuda(0)\n model.eval()\n return model\n\n\ndef load_one_image(img_pth):\n rgb_img = cv2.cvtColor(cv2.imread(img_pth), cv2.COLOR_BGR2RGB)\n original_h, original_w = rgb_img.shape[:2]\n original_rgb_img = deepcopy(rgb_img)\n img_tensor = t.tensor(np.transpose(cv2.resize(rgb_img, (img_size, img_size)) / 255, axes=[2, 0, 1])).unsqueeze(0).type(t.FloatTensor).cuda(0)\n return img_tensor, original_rgb_img, original_h, original_w\n\n\ndef predict_one_img(img_tensor, model, original_h, original_w):\n with t.no_grad():\n outputs = model(img_tensor)\n if is_deep_sup:\n result = (outputs[0] + outputs[1] + outputs[2] + outputs[3]) / 4\n else:\n result = outputs[3]\n predict_result = t.argmax(softmax_op(result), dim=1)[0].cpu().detach().numpy().astype(np.uint8)\n _, predict_mask = cv2.threshold(cv2.resize(predict_result * 255, (original_w, original_h), cv2.INTER_LINEAR), 0, 255, cv2.THRESH_BINARY)\n return predict_mask\n\n\ndef fusion(predict_mask, original_rgb_img):\n original_bgr_img = cv2.cvtColor(original_rgb_img, cv2.COLOR_BGR2RGB)\n original_bgr_img[:, :, 2][predict_mask == 255] = 255\n return original_bgr_img\n\n\ndef main():\n model = load_model()\n if save_result and os.path.exists(\"predict_result\"):\n shutil.rmtree(\"predict_result\")\n os.mkdir(\"predict_result\")\n for name in os.listdir(data_dir):\n img_pth = os.path.join(data_dir, name)\n img_tensor, original_rgb_img, original_h, original_w = load_one_image(img_pth)\n predict_mask = predict_one_img(img_tensor, model, original_h, original_w)\n fusion_result = fusion(predict_mask, original_rgb_img)\n if show_result:\n cv2.imshow(\"fusion\", cv2.resize(fusion_result, (600, 600)))\n cv2.waitKey()\n if save_result:\n cv2.imwrite(\"predict_result/%s\" % (name.replace(img_suffix, \"png\"),), fusion_result)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"1991yuyang/unet-double-plus","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39044394624","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCS224N 2018-19: Homework 5\n\"\"\"\n\n### YOUR CODE HERE for part 1h\n\nimport torch\nimport torch.nn as nn\n\nclass Highway(nn.Module): \n \"\"\"\n Class that converts input words to their CNN-based embeddings.\n \"\"\"\n\n def __init__(self, embed_size, dropout_rate=0.2):\n \"\"\"\n \"\"\"\n ### the final word embedding size\n super(Highway, self).__init__()\n self.embed_size = embed_size\n self.dropout_rate = dropout_rate\n ### input output size are both embed_size\n self.ReLU_W_proj = nn.Sequential(\n nn.Linear(self.embed_size, self.embed_size, bias=True),\n nn.ReLU()\n )\n self.Sigmoid_W_gate = nn.Sequential(\n nn.Linear(self.embed_size, self.embed_size, bias=True),\n nn.Sigmoid()\n )\n self.Dropout = nn.Dropout(self.dropout_rate)\n\n def forward(self, input):\n \"\"\"\n params:\n input: a batch of words with shape (batch, embed_size)\n \"\"\"\n xproj = self.ReLU_W_proj(input)\n xgate = self.Sigmoid_W_gate(input)\n x_highway = xproj * xgate +(1 - xgate) * input\n \n return x_highway\n\n\n### END YOUR CODE \n\n","repo_name":"haroldmei/cs224n","sub_path":"nmt_cnn/highway.py","file_name":"highway.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30783302403","text":"from collections import defaultdict\n\nfrom odoo import models\n\nfrom odoo.addons.base_sparse_field.models.fields import Serialized\n\n\nclass ShopinvaderVariant(models.Model):\n _inherit = \"shopinvader.variant\"\n\n stock_data = Serialized(compute=\"_compute_stock_data\")\n\n def _get_stock_export_key(self):\n self.ensure_one()\n line = self.env[\"ir.exports.line\"].search(\n [\n (\"export_id\", \"=\", self.index_id.exporter_id.id),\n (\"name\", \"=\", \"stock_data\"),\n ]\n )\n if line.target:\n return line.target.split(\":\")[1]\n else:\n return line.name\n\n def _prepare_stock_data(self):\n stock_field = self.backend_id.product_stock_field_id.name\n return {\"qty\": self[stock_field]}\n\n def _compute_stock_data(self):\n result = defaultdict(dict)\n for backend in self.mapped(\"backend_id\"):\n loc_records = self.filtered(lambda s: s.backend_id == backend)\n for (\n wh_key,\n wh_ids,\n ) in backend._get_warehouse_list_for_export().items():\n for loc_record in loc_records.with_context(warehouse=wh_ids):\n result[loc_record.id][wh_key] = loc_record._prepare_stock_data()\n for record in self:\n record.stock_data = result[record.id]\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader_product_stock/models/shopinvader_variant.py","file_name":"shopinvader_variant.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"12488200668","text":"from textwrap import fill\nimport discord\nfrom discord.ext import commands\nimport random\nimport requests\n\nfrom modules import MultiString\nfrom urllib.parse import quote\ninvis = 0x2F3136\n\n\nclass Meme(commands.Cog):\n \n def __init__(self, bot):\n self.bot = bot\n \n self.memes = \"all\"\n self.memes_data = None\n self.base_url = \"https://mime.rcp.r9n.co/memes\"\n\n def to_query_string(self, fields: dict) -> str:\n return \"&\".join(f\"{k}={quote(v)}\" for k, v in fields.items())\n\n def add_meme_commands(self):\n resp = requests.post(\"https://mime.rcp.r9n.co/multidocs\", json=self.memes)\n self.memes_data = {\n k: v for k, v in resp.json().items() if \"image\" not in v.values()\n }\n\n for meme_id, meme_fields in self.memes_data.items():\n\n @commands.command(\n name=meme_id,\n help=f\"{meme_fields}|||sends a meme\",\n )\n @commands.bot_has_permissions(embed_links=True)\n async def cmd(self, ctx, *, content: MultiString(n=5, fill_missing=True)):\n fields = self.to_query_string(\n {\n k: v\n for k, v in zip(\n self.memes_data[ctx.command.name].keys(),\n content[: len(self.memes_data[ctx.command.name])],\n )\n }\n )\n await ctx.embed(\n image_url=f\"{self.base_url}/{ctx.command.name}?{fields}\"\n )\n\n cmd.cog = self\n self.__cog_commands__ = self.__cog_commands__ + (cmd,)\n self.bot.add_command(cmd)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # @commands.command()\n # async def drake(self, ctx, *, content: MultiString(n=2, fill_missing=True)):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/drake?nah={quote(content[0])}&yeah={quote(content[1])}\")\n\n # @commands.command()\n # async def metaverse(self, ctx, *, content: MultiString(n=1)):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/metaverse?text={quote(content[0])}\")\n \n # @commands.command()\n # async def achievement(self, ctx, *, content: MultiString(n=1)):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/achievement?text={quote(content[0])}\")\n\n # @commands.command()\n # async def cantread(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/cantread?text={quote(content[0])}\")\n\n # @commands.command()\n # async def chaddoge(self, ctx, *, content: commands.clean_content):\n # stuff = content.split(',')\n # first = stuff[0]\n # second = stuff[1].strip() if len(stuff) == 2 else ''\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/chaddoge?chad={quote(content[0])}&virgin={quote(content[1])}\")\n\n # @commands.command()\n # async def changemymind(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/changemymind?text={quote(content[0])}\")\n\n # @commands.command()\n # async def doggo(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/doggo?text={quote(content)}\")\n\n # @commands.command()\n # async def doggo(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/doggo?text={quote(content)}\")\n\n # @commands.command()\n # async def handshake(self, ctx, *, content: commands.clean_content):\n # stuff = content.split(',')\n # first = stuff[0]\n # second = stuff[1].strip() if len(stuff) == 2 else ''\n # third = stuff[2].strip() if len(stuff) == 3 else ''\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/epichandshake?left={quote(first)}&right={quote(second)}&middle={quote(third)}\")\n \n # @commands.command()\n # async def pooh(self, ctx, *, content: commands.clean_content):\n # stuff = content.split(',')\n # first = stuff[0]\n # second = stuff[1].strip() if len(stuff) == 2 else ''\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/fancypooh?normal={quote(first)}&fancy={quote(second)}\")\n\n # @commands.command()\n # async def gus(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/gus?text={quote(content)}\")\n\n # @commands.command()\n # async def holdup(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/holdup?text={quote(content)}\")\n\n # @commands.command()\n # async def lisa(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/lisa?text={quote(content)}\")\n\n # @commands.command()\n # async def naruto(self, ctx, *, content: commands.clean_content):\n # stuff = content.split(',')\n # first = stuff[0]\n # second = stuff[1].strip() if len(stuff) == 2 else ''\n # third = stuff[2].strip() if len(stuff) == 3 else ''\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/narutohandshake?left={quote(first)}&right={quote(second)}&bottom={quote(third)}\")\n \n \n\n # @commands.command()\n # async def pikachu(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/pikachu?text={quote(content)}\")\n\n # @commands.command()\n # async def truth(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/scrolloftruth?text={quote(content)}\")\n\n # @commands.command()\n # async def skinwalker(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/skinwalker?text={quote(content)}\")\n\n # @commands.command()\n # async def burn(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/spongeburn?text={quote(content)}\")\n\n # @commands.command()\n # async def boys(self, ctx, *, content: commands.clean_content):\n # await ctx.embed(image_url = f\"https://mime.rcp.r9n.co/memes/theboys?text={quote(content)}\")\n\n\n\n\n\ndef setup(bot):\n cog = Meme(bot)\n bot.add_cog(cog)\n cog.add_meme_commands()","repo_name":"adi-shan/discordbot","sub_path":"cogs/meme.py","file_name":"meme.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12041149735","text":"import torch\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nfrom torchstat import stat\r\nimport matplotlib.pyplot as plt\r\nimport PIL.Image as Image\r\nfrom torch.utils.data import DataLoader\r\nfrom torch import nn, optim\r\nfrom torchvision.transforms import transforms\r\nfrom dataset import Train_Dataset, Validation_Dataset, Test_Dataset, All_Test_Dataset\r\nimport skimage.io as io\r\nimport shutil\r\nfrom my_loss import L1_norm, L2_norm, DiceLoss, BCEDiceLoss\r\n\r\nthreshold = 0.5 # 二分类阈值\r\n# 是否使用cuda\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nx_transforms = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.5], [0.5])\r\n])\r\n\r\n# mask只需要转换为tensor\r\ny_transforms = transforms.ToTensor()\r\n\r\n\r\ndef makedir(new_path):\r\n folder = os.path.exists(new_path)\r\n if not folder:\r\n os.makedirs(new_path)\r\n else:\r\n shutil.rmtree(new_path)\r\n os.makedirs(new_path)\r\n\r\n\r\ndef init_work_space(args):\r\n makedir('./' + args.model_name + '/results')\r\n makedir(args.ckpt)\r\n makedir('./' + args.model_name + '/runs')\r\n\r\n\r\ndef train_model(args, writer, model, criterion, optimizer, dataload, regular=''):\r\n save_epoch, best_val_acc = 0, -0.1\r\n for epoch in range(args.epoch):\r\n print('Epoch {}/{}'.format(epoch, args.epoch - 1))\r\n print('-' * 10)\r\n dt_size = len(dataload.dataset)\r\n epoch_loss = 0\r\n epoch_correct_pixels, epoch_total_pixels = [], []\r\n step = 0\r\n for x, y in dataload:\r\n step += 1\r\n inputs = x.to(device)\r\n labels = y.to(device)\r\n # zero the parameter gradients\r\n optimizer.zero_grad()\r\n # forward\r\n outputs = model(inputs).to(device)\r\n del inputs\r\n # print(outputs)\r\n # print(labels)\r\n loss = criterion(outputs, labels)\r\n if regular == 'L1' and epoch >= 100:\r\n loss = loss + 0.01 * L1_norm(model)\r\n elif regular == 'L2' and epoch >= 100:\r\n loss = loss + 0.01 * L2_norm(model)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # calculate accuracy\r\n predicted = outputs.detach().numpy()\r\n predicted[predicted >= threshold] = 1\r\n predicted[predicted < threshold] = 0\r\n correct = (predicted == labels.detach().numpy()).sum()\r\n del predicted\r\n pixel_num = 1.0\r\n for i in range(len(labels.size())):\r\n pixel_num *= labels.size()[i]\r\n\r\n # print(\"%d/%d,train loss:%0.3f\" % (step, (dt_size - 1) // dataload.batch_size + 1, loss.item()))\r\n\r\n epoch_correct_pixels.append(correct)\r\n epoch_total_pixels.append(pixel_num)\r\n epoch_loss += float(loss.item())\r\n del labels\r\n del loss\r\n val_accuracy = validation(args, model, method='train')\r\n epoch_loss = epoch_loss / step\r\n epoch_train_accuracy = np.mean(epoch_correct_pixels) / np.mean(epoch_total_pixels)\r\n print(\"epoch %d loss:%0.3f train accuracy:%0.3f val accuracy:%0.3f\" % (\r\n epoch, epoch_loss, epoch_train_accuracy, val_accuracy))\r\n writer.add_scalar('loss', epoch_loss / step, global_step=epoch)\r\n writer.add_scalar('train accuracy', epoch_train_accuracy, global_step=epoch)\r\n writer.add_scalar('validated accuracy', val_accuracy, global_step=epoch)\r\n writer.add_scalars('accuracy/group',\r\n {'train_accuracy': epoch_train_accuracy, 'validated accuracy': val_accuracy},\r\n global_step=epoch)\r\n if best_val_acc < val_accuracy:\r\n save_epoch = epoch\r\n torch.save(model, args.ckpt + '/' + args.model_name + '.pth')\r\n best_val_acc = val_accuracy\r\n print(\"Model:\", args.model_name)\r\n print(\"Dataset:\", args.data_file)\r\n print(\"Best epoch is\" + str(save_epoch))\r\n print(\"Best val acc is \" + str(best_val_acc))\r\n return model\r\n\r\n\r\n# 训练模型\r\ndef train(args, writer, model, loss='BCELoss', regular=''):\r\n model.to(device)\r\n criterion = nn.BCELoss() # nn.BCEWithLogitsLoss()\r\n if loss == 'DiceLoss':\r\n criterion = nn.BCELoss()\r\n elif loss == 'BCEDiceLoss':\r\n criterion = BCEDiceLoss()\r\n optimizer = optim.Adam(model.parameters(), )\r\n liver_dataset = Train_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms)\r\n # now, list of torch.Size([1, 512, 512]) [channel, img_x, img_y]\r\n dataloaders = DataLoader(liver_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)\r\n # now, list of torch.Size([2, 1, 512, 512]) [batch_size, channel, img_x, img_y]\r\n train_model(args, writer, model, criterion, optimizer, dataloaders, regular)\r\n\r\n\r\ndef validation(args, model, print_each=False, method='train'):\r\n liver_dataset = Validation_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms) #\r\n dataloaders = DataLoader(liver_dataset, batch_size=1)\r\n if method == 'train':\r\n dataloaders = DataLoader(liver_dataset, batch_size=8)\r\n model.eval()\r\n epoch_correct_pixels, epoch_total_pixels, OTSU_threshold = [], [], []\r\n with torch.no_grad():\r\n for x, y, x_path in dataloaders:\r\n inputs = x.to(device)\r\n labels = y.to(device)\r\n predicted = model(inputs).detach().numpy()\r\n predicted[predicted >= threshold] = 1\r\n predicted[predicted < threshold] = 0\r\n correct = (predicted == labels.detach().numpy()).sum()\r\n\r\n # predicted = model(inputs)\r\n # io.imsave(\"mid_delete.png\", torch.squeeze(predicted).detach().numpy())\r\n # predicted = cv2.imread(\"mid_delete.png\", cv2.IMREAD_GRAYSCALE)\r\n # mid_OTSU_threshold, predicted = cv2.threshold(predicted, 0, 255, cv2.THRESH_OTSU)\r\n # predicted = torch.from_numpy(predicted) / 255.0\r\n # OTSU_threshold.append(mid_OTSU_threshold)\r\n # if print_each:\r\n # print('OTSU_threshold', mid_OTSU_threshold)\r\n # correct = (predicted.numpy() == labels.detach().numpy()).sum()\r\n\r\n del predicted\r\n pixel_num = 1.0\r\n for i in range(len(labels.size())):\r\n pixel_num *= labels.size()[i]\r\n epoch_correct_pixels.append(correct)\r\n epoch_total_pixels.append(pixel_num)\r\n if print_each:\r\n print(x_path, 'acc', correct / pixel_num)\r\n return np.mean(epoch_correct_pixels) / np.mean(epoch_total_pixels)\r\n\r\n\r\ndef compare(args, model, manual=False, print_each=False, weight_path='', save_each=False, list_return=False):\r\n # 输出在指定数据集(args指定)中(包括train & val),使用一个模型进行预测的全部分割图.并对其和label的区别用颜色标记.\r\n # 绿色表示正确分割,红色表示label中是raft但prediction不是(failure prediction),黄色表示label不是raft但prediction是(failure alert).\r\n liver_dataset = Validation_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms)\r\n dataloaders = DataLoader(liver_dataset, batch_size=1)\r\n ans = []\r\n if manual:\r\n model = torch.load(weight_path, map_location='cpu')\r\n model.eval()\r\n epoch_correct_pixels, epoch_total_pixels, OTSU_threshold = [], [], []\r\n for x, y, x_path in dataloaders:\r\n inputs = x.to(device)\r\n labels = y.to(device)\r\n predicted = model(inputs).detach().numpy()\r\n if save_each:\r\n io.imsave('compare/' + x_path[0].split('\\\\')[-1].split('.')[0] + \"_gray_pre.png\",\r\n torch.squeeze(torch.tensor(predicted)).numpy())\r\n predicted[predicted >= threshold] = 1\r\n predicted[predicted < threshold] = 0\r\n correct = (predicted == labels.detach().numpy()).sum()\r\n if save_each:\r\n io.imsave('compare/' + x_path[0].split('\\\\')[-1].split('.')[0] + \"_pre.png\",\r\n torch.squeeze(torch.tensor(predicted)).numpy())\r\n io.imsave('compare/' + x_path[0].split('\\\\')[-1].split('.')[0] + \"_inputs.png\", torch.squeeze(x).numpy())\r\n io.imsave('compare/' + x_path[0].split('\\\\')[-1].split('.')[0] + \"_label.png\", torch.squeeze(y).numpy())\r\n\r\n # predicted = model(inputs)\r\n # io.imsave(\"mid_delete.png\", torch.squeeze(predicted).detach().numpy())\r\n # predicted = cv2.imread(\"mid_delete.png\", cv2.IMREAD_GRAYSCALE)\r\n # mid_OTSU_threshold, predicted = cv2.threshold(predicted, 0, 255, cv2.THRESH_OTSU)\r\n # predicted = torch.from_numpy(predicted) / 255.0\r\n # OTSU_threshold.append(mid_OTSU_threshold)\r\n # if print_each:\r\n # print('OTSU_threshold', mid_OTSU_threshold)\r\n # correct = (predicted.numpy() == labels.detach().numpy()).sum()\r\n\r\n del predicted\r\n pixel_num = 1.0\r\n for i in range(len(labels.size())):\r\n pixel_num *= labels.size()[i]\r\n epoch_correct_pixels.append(correct)\r\n epoch_total_pixels.append(pixel_num)\r\n if print_each:\r\n with open(\"compare.txt\", \"a\") as f:\r\n f.write(str(x_path) + 'acc' + str(correct / pixel_num) + '\\n') # 这句话自带文件关闭功能,不需要再写f.close()\r\n print(x_path, 'acc', correct / pixel_num)\r\n ans.append(correct / pixel_num)\r\n if list_return:\r\n return ans\r\n else:\r\n return np.mean(epoch_correct_pixels) / np.mean(epoch_total_pixels)\r\n\r\n\r\n# 显示模型的输出结果\r\ndef test(args, save_gray=False, manual=False, weight_path='', test_all_weights=False):\r\n model = None\r\n if not manual:\r\n model = torch.load(args.ckpt + '/' + args.model_name + '.pth', map_location='cpu')\r\n if manual:\r\n model = torch.load(weight_path, map_location='cpu') # use certain model weight.\r\n\r\n liver_dataset = Test_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms)\r\n if test_all_weights:\r\n liver_dataset = All_Test_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms)\r\n dataloaders = DataLoader(liver_dataset, batch_size=1)\r\n\r\n # print(\"val_accuracy\", validation(args, model, True)) # train 0.88340 val 0.856924\r\n\r\n model.eval()\r\n with torch.no_grad():\r\n OTSU_threshold = []\r\n for x, pic_name_i in dataloaders:\r\n # print(x)\r\n pic_name_i = pic_name_i[0]\r\n io.imsave(args.model_name + \"/results/\" + pic_name_i.split('.')[0] + \"_x.png\", torch.squeeze(x).numpy())\r\n predict = model(x)\r\n predict = torch.squeeze(predict).detach().numpy()\r\n\r\n # predict = model(x)\r\n # img_y = cv2.imread(args.model_name + \"/results/\" + pic_name_i.split('.')[0] + \"_gray_pre.png\",\r\n # cv2.IMREAD_GRAYSCALE)\r\n # mid_OTSU_threshold, img_y = cv2.threshold(predict, 0, 255, cv2.THRESH_OTSU)\r\n # print(mid_OTSU_threshold)\r\n # OTSU_threshold.append(mid_OTSU_threshold)\r\n # img_y[img_y >= mid_OTSU_threshold] = 1\r\n # img_y[img_y < mid_OTSU_threshold] = 0\r\n # io.imsave(args.model_name + \"/results/\" + pic_name_i.split('.')[0] + \"_label_pre.png\", img_y)\r\n if save_gray:\r\n io.imsave(args.model_name + \"/results/\" + pic_name_i.split('.')[0] + \"_gray_pre.png\", predict)\r\n\r\n predict[predict >= threshold] = 1\r\n predict[predict < threshold] = 0\r\n io.imsave(args.model_name + \"/results/\" + pic_name_i.split('.')[0] + \"_label_pre.png\", predict)\r\n\r\n\r\nclass SaveOutput:\r\n def __init__(self):\r\n self.outputs = []\r\n\r\n def __call__(self, module, module_in, module_out):\r\n self.outputs.append(module_out)\r\n\r\n def clear(self):\r\n self.outputs = []\r\n\r\n\r\ndef model_forward_visualization(image_path, weight_path, model_name=''):\r\n \"\"\"输入一张测试图像和训练好的模型权重,可视化每一步卷积的结果\"\"\"\r\n model = torch.load(weight_path, map_location='cpu') # load trained model\r\n\r\n save_output = SaveOutput() # register hooks for each layer\r\n hook_handles, k1, k2 = [], 0, 0\r\n for layer in model.modules():\r\n k1 += 1\r\n if isinstance(layer, torch.nn.modules.conv.Conv2d):\r\n k2 += 1\r\n handle = layer.register_forward_hook(save_output)\r\n hook_handles.append(handle)\r\n # print(model)\r\n # print('one layer name')\r\n # print(model.Up2.up)\r\n # print(model.Up2.up[0])\r\n # print('layer number', k1, k2)\r\n # print(len(save_output.outputs))\r\n\r\n x = x_transforms(Image.open(image_path).convert('L').resize(size=(512, 512))).unsqueeze(\r\n 0) # prepare the input image torch.Size([1, 1, 512, 512])\r\n print(x, x.dtype)\r\n y = model(x)\r\n\r\n # print(len(save_output.outputs))\r\n\r\n def module_output_to_numpy(tensor):\r\n return tensor.detach().to('cpu').numpy()\r\n\r\n # for x in save_output.outputs:\r\n # print(x.shape)\r\n for layer_idx in range(len(save_output.outputs)):\r\n images = module_output_to_numpy(save_output.outputs[layer_idx])\r\n # 这里的0代表读取output里第一个卷积层的输出\r\n\r\n print(type(images))\r\n print(images.shape)\r\n mid_1 = images.shape[1]\r\n mid_idx = 0\r\n while mid_idx < mid_1:\r\n # mid_idx is the index of feature\r\n with plt.style.context(\"seaborn-white\"):\r\n plt.figure(frameon=False)\r\n for idx in range(64):\r\n # idx is the index of subplot\r\n if mid_idx == mid_1:\r\n break\r\n plt.subplot(8, 8, idx + 1)\r\n plt.imshow(images[0, mid_idx])\r\n mid_idx += 1\r\n plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\r\n plt.savefig(\r\n './model_visualization/' + model_name + '/layer_' + str(layer_idx) + '_mid_' + str(mid_idx) + '.png')\r\n # plt.show()\r\n plt.cla()\r\n plt.close('all')\r\n\r\n\r\ndef model_parameter_visualization(weight_path):\r\n \"\"\"使用一个全为1的torch,输入训练好的模型权重,可视化每一步卷积的结果\"\"\"\r\n model = torch.load(weight_path, map_location='cpu') # load trained model\r\n\r\n save_output = SaveOutput() # register hooks for each layer\r\n hook_handles, k1, k2 = [], 0, 0\r\n for layer in model.modules():\r\n k1 += 1\r\n if isinstance(layer, torch.nn.modules.conv.Conv2d):\r\n k2 += 1\r\n handle = layer.register_forward_hook(save_output)\r\n hook_handles.append(handle)\r\n\r\n x = torch.full([1, 1, 512, 512], 1, dtype=torch.float32) # prepare the input image\r\n print(x)\r\n y = model(x)\r\n\r\n # print(len(save_output.outputs))\r\n\r\n def module_output_to_numpy(tensor):\r\n return tensor.detach().to('cpu').numpy()\r\n\r\n # for x in save_output.outputs:\r\n # print(x.shape)\r\n for layer_idx in range(len(save_output.outputs)):\r\n images = module_output_to_numpy(save_output.outputs[layer_idx])\r\n # 这里的0代表读取output里第一个卷积层的输出\r\n\r\n print(type(images))\r\n print(images.shape)\r\n mid_1 = images.shape[1]\r\n mid_idx = 0\r\n while mid_idx < mid_1:\r\n # mid_idx is the index of feature\r\n with plt.style.context(\"seaborn-white\"):\r\n plt.figure(frameon=False)\r\n for idx in range(64):\r\n # idx is the index of subplot\r\n if mid_idx == mid_1:\r\n break\r\n plt.subplot(8, 8, idx + 1)\r\n plt.imshow(images[0, mid_idx])\r\n mid_idx += 1\r\n plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\r\n plt.savefig('./model_visualization/layer_' + str(layer_idx) + '_mid_' + str(mid_idx) + '.png')\r\n # plt.show()\r\n plt.cla()\r\n plt.close('all')\r\n\r\n\r\n# model_forward_visualization(image_path=\"./data/train/image/8.png\",\r\n# weight_path=\"./Attention_Unet_500epoch/weights/Attention_Unet_500epoch_weights_90.pth\")\r\n# model_parameter_visualization(weight_path=\"./Attention_Unet_500epoch/weights/Attention_Unet_500epoch_weights_90.pth\")\r\n\r\n\r\ndef model_print(model, input_shape=(1, 512, 512)):\r\n # 1. print parameter number\r\n # 2. draw model structure\r\n\r\n # from torchstat import stat\r\n # stat(model, input_shape)\r\n\r\n print(sum(p.numel() for p in model.parameters()))\r\n\r\n\r\ndef all_test(data_file='./data', model_path='./weights/', save_gray=True):\r\n # 使用指定的数据集,测试存储在weights文件夹中的每一个模型在全部图像上的分割情况。test图像可以没有对应的label\r\n model_files = os.listdir(model_path)\r\n for i in range(len(model_files)):\r\n # if model_files[i] != 'd1_NestedUNet_250epoch.pth' and model_files[i] != 'd1_my_model4_250epoch.pth':\r\n # continue\r\n liver_dataset = All_Test_Dataset(data_file, transform=x_transforms, target_transform=y_transforms)\r\n dataloaders = DataLoader(liver_dataset, batch_size=1)\r\n\r\n weight_path = model_path + model_files[i]\r\n model_name = model_files[i].split('.')[0]\r\n print(weight_path, model_name)\r\n makedir(\"all_test_results/\" + model_name)\r\n model = torch.load(weight_path, map_location='cpu')\r\n model.eval()\r\n with torch.no_grad():\r\n for x, pic_name_i in dataloaders:\r\n pic_name_i = pic_name_i[0]\r\n io.imsave(\"all_test_results/\" + model_name + \"/\" + pic_name_i.split('.')[0] + \"_x.png\",\r\n torch.squeeze(x).numpy())\r\n predict = model(x)\r\n predict = torch.squeeze(predict).detach().numpy()\r\n if save_gray:\r\n io.imsave(\"all_test_results/\" + model_name + \"/\" + pic_name_i.split('.')[0] + \"_gray_pre.png\",\r\n predict)\r\n predict[predict >= threshold] = 1\r\n predict[predict < threshold] = 0\r\n io.imsave(\"all_test_results/\" + model_name + \"/\" + pic_name_i.split('.')[0] + \"_label_pre.png\", predict)\r\n\r\n# all_test()\r\n# model_forward_visualization(image_path=\"./d1/val/image/140.png\", weight_path=\"./weights/d1_my_model1_250epoch.pth\",\r\n# model_name='d1_my_model1_250epoch_2')\r\n# model_forward_visualization(image_path=\"./d1/val/image/140.png\",\r\n# weight_path=\"./weights/Attention_Unet_500epoch_weights_90.pth\", model_name='Attention_Unet')\r\n","repo_name":"fjc1575/Marine-Aquaculture","sub_path":"MDOAU-net/wjc_core.py","file_name":"wjc_core.py","file_ext":"py","file_size_in_byte":18708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25387522694","text":"# coding=utf-8\n'''\n丰田和雷克萨斯通过EPC颜色代码查找符合条件的VIN\n'''\n\nimport pymysql\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\ndef be_really(vins, vin_head, year):\n vin_end = vins.split(\"-\")[1]\n v = get_right_vin(vin_head, vin_end[0], int(vin_end[1:]), year)\n return v\n\ndef get_right_vin(vin8, vin2, vin_end, year):\n \"\"\"\n 根据权重校验获取有效的vin(该vin并不一定存在)\n :param vin8: vin的前8位\n :param vin2: vin的10,11位\n :param vin_end: vin的后6位\n :return: 有效的vin\n \"\"\"\n # vin中字母对应的值\n dict_alp = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'J': 1, 'K': 2, 'L': 3, 'M': 4,\n 'N': 5, 'P': 7, 'R': 9, 'S': 2, 'T': 3, 'U': 4, 'V': 5, 'W': 6, 'X': 7, 'Y': 8, 'Z': 9,\n '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '*': 0}\n # vin中数字对应的加权系数值,第9位是权重位\n dict_index = {1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 10, 9: 0,\n 10: 9, 11: 8, 12: 7, 13: 6, 14: 5, 15: 4, 16: 3, 17: 2}\n # 年份表\n dict_year = {\n 2020: 'L', 2019: 'K', 2018: 'J', 2017: 'H', 2016: 'G', 2015: 'F', 2014: 'E', 2013: 'D', 2012: 'C',\n 2011: 'B', 2010: 'A', 2009: '9', 2008: '8', 2007: '7', 2006: '6', 2005: '5', 2004: '4', 2003: '3'\n }\n vin2 = dict_year[int(year)] + vin2\n # 前八位权重值\n q1 = 0\n for i in range(1, len(vin8) + 1):\n q1 += (dict_alp[vin8[i - 1]] * dict_index[i])\n\n # 第9位暂时补‘*’,待算出v9后替换掉‘*’\n vin = vin8 + \"*\" + vin2 + \"%06d\" % vin_end\n # 权重值\n q2 = 0\n for i in range(10, len(vin) + 1):\n q2 += (dict_alp[vin[i - 1]] * dict_index[i])\n # 确认vin的第9位的值\n v9 = \"X\" if (q1 + q2) % 11 == 10 else str((q1 + q2) % 11)\n vin = vin.replace(\"*\", v9)\n return vin\n\ndef select_vin_by_color(color_code):\n conn = pymysql.connect(host='192.168.3.110', user='jing', passwd='123456', db='toyota_201910', charset='utf8')\n cur = conn.cursor()\n cur.execute(\"USE toyota_201910\")\n sql = '''\n SELECT T1.*,`feature_vin`.vin_head\n FROM\n `feature_vin`,\n (SELECT head_ofs,frame_num,LEFT(prod_date,4),color\n FROM `feature_frame_less_data_for_carcolor`\n WHERE color = '{}'\n AND LEFT(prod_date,4) > 2013\n GROUP BY head_ofs) AS T1\n WHERE \n T1.head_ofs = `feature_vin`.head_ofs \n AND LEFT(`feature_vin`.vin_head,3) IN (\n 'LFM', 'LVG', 'JTE', 'JTM', 'LTV', 'JTN', '5TD', '5TF', 'JF1', 'LCU', 'LFB', 'JTF', 'MHF', 'JTK', 'MR1', 'LFP', \n '4T3', 'JTG', 'JTD', 'JTB', 'LTU', 'JT5', 'LF4', 'LAG', '2FM', 'JZS', 'L25', 'GGH', 'LVR', 'JT1', 'JLE', '2TE', \n 'GTJ', 'LPM', 'LGB', 'WBA', '5FM', 'LF0'\n )\n ORDER BY FIELD(LEFT(`feature_vin`.vin_head,3),'LFM', 'LVG', 'JTE', 'JTM', 'LTV', 'JTN', '5TD', '5TF', 'JF1', 'LCU', 'LFB', 'JTF', 'MHF', 'JTK', 'MR1', 'LFP', '4T3', 'JTG', 'JTD', 'JTB', 'LTU', 'JT5', 'LF4', 'LAG', '2FM', 'JZS', 'L25', 'GGH', 'LVR', 'JT1', 'JLE', '2TE', 'GTJ', 'LPM', 'LGB', 'WBA', '5FM', 'LF0')\n LIMIT 10;'''.format(color_code)\n cur.execute(sql)\n result = cur.fetchall()\n cur.close()\n conn.close()\n return result\n\ndef get_color_from_sjb(vin):\n '''数据宝刷数据'''\n # 设置请求头\n headers = {\n 'Cookie': 'PHPSESSID=254mq8cqla25sn6hu9pcbh6hfc',\n 'Host': 'vin-tools',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.6 Safari/537.36',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n }\n url = 'http://vin-tools/index.php/Home/CatarcTools/QueryEngineCode?vin={}&full_data=1&tdsourcetag=s_pctim_aiomsg'.format(vin)\n r = requests.get(url, headers=headers, timeout=6)\n if r.status_code == 200:\n bsObj = BeautifulSoup(r.text, 'lxml')\n body = bsObj.find('pre')\n content = body.get_text()\n model_id = re.compile('车型ID:[0-9]{0,10}车型名称').search(content)\n color_name = re.compile('颜色:[\\S|\\s]{0,15}生产时间').search(content)\n if model_id:\n model_id = model_id.group().replace('车型ID:', '').replace('车型名称', '')\n else:\n model_id = ''\n if color_name:\n color_name = color_name.group().replace('颜色:', '').replace('生产时间', '')\n else:\n color_name = ''\n return [model_id, color_name]\n else:\n return None\n\ncolor_list = ['1H4']\nresult_list = []\n# 依据color_code查前几条VIN信息\nfor color in color_list:\n print('正在查找符合条件的车架号信息...', color)\n vins = select_vin_by_color(color)\n # 依据VIN信息推算真实VIN\n for i in vins:\n frame_num = i[1]\n vin_head = i[4][:8]\n year = i[2]\n vin = be_really(frame_num, vin_head, year)\n # 依据真实VIN刷数据宝颜色数据\n model_id_AND_color = get_color_from_sjb(vin)\n if model_id_AND_color:\n result_list.append([vin, color] + model_id_AND_color)\n if model_id_AND_color[1] != '':\n break\n print('该color_code结束', color)\nprint(result_list)\n\n","repo_name":"JingTen/Python_code_company","sub_path":"00000000000/toyota_color_find_vin.py","file_name":"toyota_color_find_vin.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30302378658","text":"import unittest\n\nfrom stacks import QueueStack\nfrom stacks import DoubleListStack, StackWithMax\n\n\nclass TestStacks(unittest.TestCase):\n def test_stack_with_max(self):\n s = StackWithMax()\n s.push(3)\n s.push(1)\n max = s.max()\n self.assertEqual(max, 3)\n s.push(4)\n max = s.max()\n self.assertEqual(max, 4)\n el = s.pop()\n self.assertEqual(el, 4)\n max = s.max()\n self.assertEqual(max, 3)\n\n def test_queue_stack(self):\n s = QueueStack()\n s.enqueue(1)\n s.enqueue(2)\n el = s.dequeue()\n self.assertEqual(1, el)\n\n def test_queue_stack_raises_exception_on_empty(self):\n s = QueueStack()\n self.assertRaises(Exception, s.dequeue)\n\n def test_two_list_stacks(self):\n s = DoubleListStack(4)\n s.pushLeft(10)\n s.pushRight(20)\n r1 = s.popLeft()\n r2 = s.popRight()\n self.assertEqual(10, r1)\n self.assertEqual(20, r2)\n\n def test_two_list_stacks_raises_exception_on_full(self):\n s = DoubleListStack(2)\n s.pushLeft(10)\n self.assertRaises(Exception,s.pushRight)\n\n def test_two_list_stacks_raises_exception_on_empty(self):\n s = DoubleListStack(2)\n self.assertRaises(Exception,s.popRight)\n","repo_name":"robrogers3/python-tests","sub_path":"tests/test_stacks.py","file_name":"test_stacks.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29089807439","text":"from PyQt5 import QtGui\nfrom PyQt5.Qt import *\nimport sys\n\n\nclass SnakeGame(QMainWindow):\n def __init__(self):\n super(SnakeGame, self).__init__()\n self.initUI()\n # 添加游戏画布\n self.game_board = GameBoard()\n self.setCentralWidget(self.game_board)\n # 初始化游戏\n\n def initUI(self):\n self.setGeometry(1500, 500, 500, 500)\n self.setWindowTitle('Snake Game')\n self.show()\n\n\nclass GameBoard(QWidget):\n def paintEvent(self, a0: QtGui.QPaintEvent) -> None:\n qp = QPainter(self)\n qp.fillRect(0, 0, self.width(), self.height(), Qt.white)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n game = SnakeGame()\n sys.exit(app.exec())\n","repo_name":"sunnkey/pro_PyQt5","sub_path":"Projects/Snake Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2626093250","text":"\n# define monster class\n\nclass Monster:\n def __init__(self, name, primType, subType, hp, level, movesRoster, hmComp, tmComp):\n self.name = name\n self.primType = primType\n self.subType = None\n self.hp = 10\n self.level = 1\n self.movesRoster = []\n # the following arrays define which moves a monster can learn\n self.hmComp = []\n self.tmComp = []\n\n# define trainer class\n\n\nclass Trainer:\n def __init__(self, name, pkRoster):\n self.name = name\n self.pkRoster = pkRoster\n\n def introduce(self):\n print(\n f'My name is {self.name} and my starting Pokemon is {self.pkRoster}.')\n\n# define area graph object\n\n\nclass Area:\n def __init__(self, name, description, goTo, comeFrom, pcIsHere):\n self.name = name\n self.description = description\n self.goTo = goTo\n self.comeFrom = comeFrom\n self.pcIsHere = False\n","repo_name":"theeelgonzo/pktb","sub_path":"obs.py","file_name":"obs.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71345739786","text":"from yacs.config import CfgNode as CN\nimport os\n\n__C = CN()\n\n__C.dataset = CN()\n__C.dataset.dataset_pth = ''\n__C.dataset.num_landmarks = 19\n__C.dataset.eval_radius = []\n\n__C.dataset.AUGMENTATION = CN(new_allowed=True)\n__C.dataset.AUGMENTATION.REVERSE_AXIS = False\n__C.dataset.AUGMENTATION.FLIP = False\n__C.dataset.AUGMENTATION.FLIP_PAIRS = []\n__C.dataset.AUGMENTATION.ROTATION_FACTOR = 3\n__C.dataset.AUGMENTATION.INTENSITY_FACTOR = 0.5\n__C.dataset.AUGMENTATION.SF = 0.05\n__C.dataset.AUGMENTATION.TRANSLATION_X = 10\n__C.dataset.AUGMENTATION.TRANSLATION_Y = 10\n__C.dataset.AUGMENTATION.ELASTIC_STRENGTH = 500\n__C.dataset.AUGMENTATION.ELASTIC_SMOOTHNESS = 30\n\n__C.train = CN()\n__C.train.learning_rate = 0.001\n__C.train.batch_size = 8\n__C.train.decay_step = 50\n__C.train.decay_gamma = 0.5\n__C.train.num_epochs = 300\n__C.train.num_workers = 8\n__C.train.save_seq = 10\n__C.train.loss_lambda = 0.001\n__C.train.input_size = []\n__C.test = CN()\n__C.test.id_oneshot = []\n\n__C.args = CN()\n\ndef get_cfg_defaults():\n\treturn __C.clone()\n\ndef merge_cfg_datasets(cfg, dataset='head'):\n\tassert dataset in ['head', 'hand', 'leg', 'chest']\n\tcurrent_file_path = os.path.abspath(__file__)\n\tcurrent_directory = os.path.dirname(current_file_path)\n\tdataset_yaml = os.path.join(current_directory, f'{dataset}.yaml')\n\tcfg.merge_from_file(dataset_yaml)\n\tcfg.dataset.dataset = dataset\n\ndef merge_cfg_train(cfg, train='voting'):\n\tassert hasattr(cfg.dataset, 'dataset')\n\tassert train in ['SSL', 'voting', 'heatmap', 'TPL', 'SAM', 'ERE']\n\tcurrent_file_path = os.path.abspath(__file__)\n\tcurrent_directory = os.path.dirname(current_file_path)\n\tdataset_yaml = os.path.join(current_directory, f'{train}_{cfg.dataset.dataset}.yaml')\n\tcfg.merge_from_file(dataset_yaml)\n\ndef merge_from_args(cfg, args, key_list=None):\n\targs_dict = vars(args)\n\tif key_list is None:\n\t\tfor key, value in args_dict.items():\n\t\t\tcfg.args[key] = value\n\telse:\n\t\tfor key in key_list:\n\t\t\tcfg.args[key] = args_dict[key]","repo_name":"MIRACLE-Center/Oneshot_landmark_detection","sub_path":"code/cc2d-final/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"81"} +{"seq_id":"69926039305","text":"FLOOR = '.'\nSEAT_EMPTY = 'L'\nSEAT_OCCUPIED = '#'\n\nUP = -1\nDOWN = +1\nRIGHT = +1\nLEFT = -1\n\ndef parse_input(filename):\n with open(filename) as f:\n return [list(line.strip()) for line in f.readlines()]\n\ndef print_grid(grid):\n for line in grid:\n print(\"\".join(line))\n print()\n\ndef adjacent(row, col, grid):\n ROW_MAX = len(grid) \n COL_MAX = len(grid[0])\n\n def next(row, col, dx=0, dy=0):\n x = row + dx\n y = col + dy\n\n seat = grid[x][y] if (0 <= x < ROW_MAX) and (0 <= y < COL_MAX) else None\n \n if seat == FLOOR:\n seat = next(row + dx, col + dy, dx, dy)\n \n return seat\n\n right = lambda row, col: next(row, col, dy=RIGHT)\n left = lambda row, col: next(row, col, dy=LEFT)\n up = lambda row, col: next(row, col, dx=UP)\n down = lambda row, col: next(row, col, dx=DOWN)\n up_left = lambda row, col: next(row, col, dx=UP, dy=LEFT)\n down_left = lambda row, col: next(row, col, dx=DOWN, dy=LEFT)\n up_right = lambda row, col: next(row, col, dx=UP, dy=RIGHT)\n down_right = lambda row, col: next(row, col, dx=DOWN, dy=RIGHT)\n\n seats = [fun(row, col) for fun in [up, down, left, right, up_left, up_right, down_left, down_right]]\n\n return list(filter(lambda s: s != None and s != FLOOR, seats))\n\ndef all_empty(adjacent):\n return all(map(lambda x: x == SEAT_EMPTY, adjacent))\n\ndef at_least_five_occupied(adjacent):\n return len(list(filter(lambda x: x == SEAT_OCCUPIED, adjacent))) >= 5\n\ndef model_round(grid):\n new_grid = []\n\n for r, row in enumerate(grid):\n new_row = []\n for c, seat in enumerate(row):\n if seat == FLOOR:\n new_row.append(FLOOR)\n else:\n a = adjacent(r, c, grid)\n if seat == SEAT_EMPTY and all_empty(a):\n new_row.append(SEAT_OCCUPIED)\n elif seat == SEAT_OCCUPIED and at_least_five_occupied(a):\n new_row.append(SEAT_EMPTY)\n else:\n new_row.append(seat)\n\n new_grid.append(new_row)\n new_row = []\n\n return new_grid\n\ndef count_occupied(grid):\n return sum([sum([True for seat in row if seat == SEAT_OCCUPIED]) for row in grid])\n\ndef run_model(grid):\n next_grid = model_round(grid)\n\n if next_grid == grid:\n return grid\n \n return run_model(next_grid)\n\n\ndef main():\n grid = parse_input(\"input.txt\")\n \n stable_grid = run_model(grid)\n \n return count_occupied(stable_grid)\n\nprint(main())","repo_name":"aurlien/AdventOfCode2020","sub_path":"d11/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10333863058","text":"import os\nimport cPickle as pickle\nimport re\n\nimport gobject\nimport gtk\n\nimport blaplay\nui_manager = blaplay.bla.ui_manager\nfrom blaplay.blacore import blaconst, blacfg\nfrom blaplay import blagui\nfrom blaplay.formats._identifiers import *\nfrom blawindows import BlaScrolledWindow\nfrom blatracklist import (\n COLUMN_ARTIST, COLUMN_ALBUM, COLUMN_ALBUM_ARTIST, COLUMN_GENRE, popup,\n update_columns, parse_track_list_stats, BlaTreeView, BlaTrackListItem)\nfrom blastatusbar import BlaStatusbar\nfrom blaview import BlaViewMeta\nfrom blaplaylist import playlist_manager\n\n\nclass BlaQueue(BlaScrolledWindow):\n __metaclass__ = BlaViewMeta(\"Queue\")\n\n __layout = (\n gobject.TYPE_PYOBJECT, # An instance of BlaTrackListItem\n gobject.TYPE_STRING # Position in the queue\n )\n\n def __init__(self):\n super(BlaQueue, self).__init__()\n\n self.__size = 0\n self.__length = 0\n self.clipboard = []\n\n self.__treeview = BlaTreeView(view_id=blaconst.VIEW_QUEUE)\n self.__treeview.set_model(gtk.ListStore(*self.__layout))\n self.__treeview.set_enable_search(False)\n self.__treeview.set_property(\"rules_hint\", True)\n\n self.set_shadow_type(gtk.SHADOW_IN)\n self.add(self.__treeview)\n\n self.__treeview.enable_model_drag_dest(\n [(\"queue\", gtk.TARGET_SAME_WIDGET, 3)], gtk.gdk.ACTION_COPY)\n self.__treeview.enable_model_drag_source(\n gtk.gdk.BUTTON1_MASK,\n [(\"queue\", gtk.TARGET_SAME_WIDGET, 3)],\n gtk.gdk.ACTION_COPY)\n\n self.__treeview.connect(\"popup\", popup, blaconst.VIEW_QUEUE, self)\n self.__treeview.connect(\"row_activated\", self.play_item)\n self.__treeview.connect(\n \"button_press_event\", self.__button_press_event)\n self.__treeview.connect(\"key_press_event\", self.__key_press_event)\n self.__treeview.connect(\"drag_data_get\", self.__drag_data_get)\n self.__treeview.connect(\"drag_data_received\", self.__drag_data_recv)\n\n update_columns(self.__treeview, view_id=blaconst.VIEW_QUEUE)\n self.show_all()\n\n def __button_press_event(self, treeview, event):\n if (event.button == 2 and\n event.type not in [gtk.gdk._2BUTTON_PRESS,\n gtk.gdk._3BUTTON_PRESS]):\n self.paste()\n return True\n\n def __key_press_event(self, treeview, event):\n if blagui.is_accel(event, \"X\"):\n self.cut()\n elif blagui.is_accel(event, \"C\"):\n self.copy()\n elif blagui.is_accel(event, \"V\"):\n self.paste()\n elif blagui.is_accel(event, \"Delete\"):\n self.remove()\n return False\n\n def __drag_data_get(self, treeview, drag_context, selection_data, info,\n time):\n data = pickle.dumps(treeview.get_selection().get_selected_rows()[-1],\n pickle.HIGHEST_PROTOCOL)\n selection_data.set(\"\", 8, data)\n\n def __drag_data_recv(self, treeview, drag_context, x, y, selection_data,\n info, time):\n drop_info = treeview.get_dest_row_at_pos(x, y)\n model = self.__treeview.get_model()\n paths = pickle.loads(selection_data.data)\n\n # TODO: factor this out so we can use the same for the playlist\n if drop_info:\n path, pos = drop_info\n iterator = model.get_iter(path)\n\n if (pos == gtk.TREE_VIEW_DROP_BEFORE or\n pos == gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):\n move_before = model.move_before\n def move_func(it):\n move_before(it, iterator)\n else:\n move_after = model.move_after\n def move_func(it):\n move_after(it, iterator)\n paths.reverse()\n else:\n iterator = None\n move_before = model.move_before\n def move_func(it):\n move_before(it, iterator)\n\n get_iter = model.get_iter\n iterators = map(get_iter, paths)\n map(move_func, iterators)\n self.update_queue_positions()\n\n def __add_items(self, items, path=None, select_rows=False):\n treeview = self.__treeview\n model = treeview.get_model()\n iterator = None\n\n try:\n if (not treeview.get_selection().get_selected_rows()[-1] or\n path == -1):\n raise TypeError\n if not path:\n path, column = treeview.get_cursor()\n except TypeError:\n path = (len(model),)\n append = model.append\n def insert_func(iterator, item):\n append(item)\n else:\n iterator = model.get_iter(path)\n insert_func = model.insert_before\n items.reverse()\n\n for item in items:\n iterator = insert_func(iterator, [item, None])\n\n if select_rows:\n treeview.freeze_notify()\n selection = treeview.get_selection()\n selection.unselect_all()\n select_path = selection.select_path\n map(select_path, xrange(path[0], path[0] + len(items)))\n treeview.thaw_notify()\n\n self.update_queue_positions()\n\n def __get_items(self, remove=True):\n treeview = self.__treeview\n model, selections = treeview.get_selection().get_selected_rows()\n if selections:\n get_iter = model.get_iter\n iterators = map(get_iter, selections)\n items = [model[iterator][0] for iterator in iterators]\n if remove:\n remove = model.remove\n map(remove, iterators)\n self.update_queue_positions()\n return items\n return []\n\n def play_item(self, treeview, path, column=None):\n model = treeview.get_model()\n iterator = model.get_iter(path)\n model[iterator][0].play()\n if blacfg.getboolean(\"general\", \"queue.remove.when.activated\"):\n model.remove(iterator)\n self.update_queue_positions()\n\n def update_statusbar(self):\n model = self.__treeview.get_model()\n count = len(model)\n if count == 0:\n info = \"\"\n else:\n info = parse_track_list_stats(count, self.__size, self.__length)\n BlaStatusbar.set_view_info(blaconst.VIEW_QUEUE, info)\n\n def select(self, type_):\n treeview = self.__treeview\n selection = treeview.get_selection()\n model, selected_paths = selection.get_selected_rows()\n\n if type_ == blaconst.SELECT_ALL:\n selection.select_all()\n return\n elif type_ == blaconst.SELECT_COMPLEMENT:\n selected_paths = set(selected_paths)\n paths = set([(p,) for p in xrange(len(model))])\n paths.difference_update(selected_paths)\n selection.unselect_all()\n select_path = selection.select_path\n map(select_path, paths)\n return\n elif type_ == blaconst.SELECT_BY_ARTISTS:\n column_id = COLUMN_ARTIST\n elif type_ == blaconst.SELECT_BY_ALBUMS:\n column_id = COLUMN_ALBUM\n elif type_ == blaconst.SELECT_BY_ALBUM_ARTISTS:\n column_id = COLUMN_ALBUM_ARTIST\n else:\n column_id = COLUMN_GENRE\n\n items = [model[path][0] for path in selected_paths]\n eval_ = BlaEval(column_id).eval\n values = set()\n for item in items:\n values.add(eval_(item.track).lower())\n if not values:\n return\n r = re.compile(\n r\"^(%s)$\" % \"|\".join(values), re.UNICODE | re.IGNORECASE)\n items = [row[0] for row in model if r.match(eval_(row[0].track))]\n paths = [row.path for row in model if row[0] in items]\n selection.unselect_all()\n select_path = selection.select_path\n map(select_path, paths)\n\n def update_queue_positions(self):\n model = self.__treeview.get_model()\n\n # Update the position labels for our own treeview.\n for idx, row in enumerate(model):\n model[row.path][1] = idx+1\n\n # Invalidate the visible rows of the current playlists so the\n # position labels also get updated in playlists.\n playlist = playlist_manager.get_current_playlist()\n playlist.invalidate_visible_rows()\n\n # Calculate size and length of the queue and update the statusbar.\n size = length = 0\n for row in model:\n track = row[0].track\n size += track[FILESIZE]\n length += track[LENGTH]\n self.__size, self.__length = size, length\n self.emit(\"count_changed\", blaconst.VIEW_QUEUE, self.n_items)\n self.update_statusbar()\n\n def get_queue_positions(self, item):\n model = self.__treeview.get_model()\n return [row[1] for row in model if row[0] == item]\n\n def queue_items(self, items):\n if not items:\n return\n\n # If any of the items is not an instance of BlaTrackListItem it means\n # all of the items are actually just URIs which stem from the library\n # browser and are not part of a playlist.\n if not isinstance(items[0], BlaTrackListItem):\n items = map(BlaTrackListItem, items)\n\n count = blaconst.QUEUE_MAX_ITEMS - self.n_items\n self.__add_items(items[:count], path=-1)\n\n def remove_items(self, items):\n # This is invoked by playlists who want to remove tracks from the\n # queue.\n model = self.__treeview.get_model()\n for row in model:\n if row[0] in items:\n model.remove(row.iter)\n self.update_queue_positions()\n\n def get_queue(self):\n queue = []\n playlists = playlist_manager.get_playlists()\n\n for row in self.__treeview.get_model():\n item = row[0]\n playlist = item.playlist\n\n try:\n playlist_idx = playlists.index(playlist)\n except ValueError:\n item = (item.uri,)\n else:\n item = (playlist_idx,\n playlist.get_path_from_item(item, all_=True))\n\n queue.append(item)\n\n return queue\n\n def restore(self, items):\n print_i(\"Restoring the play queue\")\n\n if not items:\n return\n\n playlists = playlist_manager.get_playlists()\n\n for idx, item in enumerate(items):\n try:\n playlist_idx, path = item\n except ValueError:\n # Library tracks that are not part of a playlist.\n item = BlaTrackListItem(item)\n else:\n item = playlists[playlist_idx].get_item_from_path(path)\n items[idx] = item\n\n self.queue_items(items)\n\n def cut(self, *args):\n self.clipboard = self.__get_items(remove=True)\n ui_manager.update_menu(blaconst.VIEW_QUEUE)\n\n def copy(self, *args):\n # We specifically don't create actual copies of items here as it's not\n # desired to have unique ones in the queue. Copied and pasted tracks\n # should still refer to the same BlaTrackListItem instances which are\n # possibly part of a playlist.\n self.clipboard = self.__get_items(remove=False)\n ui_manager.update_menu(blaconst.VIEW_QUEUE)\n\n def paste(self, *args, **kwargs):\n self.__add_items(items=self.clipboard, select_rows=True)\n\n def remove(self, *args):\n self.__get_items(remove=True)\n\n def remove_duplicates(self):\n unique = set()\n model = self.__treeview.get_model()\n for row in model:\n uri = row[0].uri\n if uri not in unique:\n unique.add(uri)\n else:\n model.remove(row.iter)\n self.update_queue_positions()\n\n def remove_invalid_tracks(self):\n model = self.__treeview.get_model()\n isfile = os.path.isfile\n\n for row in model:\n uri = row[0].uri\n if not isfile(uri):\n model.remove(row.iter)\n self.update_queue_positions()\n\n def clear(self):\n self.__treeview.get_model().clear()\n self.update_queue_positions()\n\n def get_item(self):\n model = self.__treeview.get_model()\n iterator = model.get_iter_first()\n if iterator:\n item = model[iterator][0]\n model.remove(iterator)\n self.update_queue_positions()\n return item\n return None\n\n @property\n def n_items(self):\n return len(self.__treeview.get_model())\n\nqueue = BlaQueue()\n\n","repo_name":"nkoep/blaplay","sub_path":"blaplay/blagui/blaqueue.py","file_name":"blaqueue.py","file_ext":"py","file_size_in_byte":12614,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"4261500182","text":"from pathlib import PurePath\n\nfrom django.conf import settings\nimport django.contrib.postgres.fields.jsonb\nimport django.contrib.postgres.indexes\nimport django.core.serializers.json\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mptt.fields\nimport uuid\n\nfrom datahub.core.migration_utils import load_yaml_data_in_migration\n\n\ndef load_initial_statuses(apps, schema_editor):\n load_yaml_data_in_migration(\n apps,\n PurePath(__file__).parent / '0001_initial_statuses.yaml'\n )\n\n\ndef load_initial_policy_area(apps, schema_editor):\n load_yaml_data_in_migration(\n apps,\n PurePath(__file__).parent / '0001_initial_policy_areas.yaml'\n )\n\n\ndef load_initial_policy_issue_type(apps, schema_editor):\n load_yaml_data_in_migration(\n apps,\n PurePath(__file__).parent / '0001_initial_issue_types.yaml'\n )\n\n\ndef load_initial_communication_channels(apps, schema_editor):\n load_yaml_data_in_migration(\n apps,\n PurePath(__file__).parent / '0001_initial_communication_channels.yaml'\n )\n\n\ndef load_service_questions_and_answers(apps, schema_editor):\n load_yaml_data_in_migration(\n apps,\n PurePath(__file__).parent / '0001_service_questions_and_answers.yaml',\n )\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('metadata', '0001_squashed_0010_auto_20180613_1553'),\n ('investment', '0001_squashed_0063_add_created_on_id_index'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('core', '0003_rename_read_permissions'),\n ('event', '0008_add_service'),\n ('company', '0001_squashed_0096_company_global_ultimate_duns_number'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CommunicationChannel',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),\n ('name', models.TextField(blank=True)),\n ('disabled_on', models.DateTimeField(blank=True, null=True)),\n ],\n options={\n 'ordering': ('name',),\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='ServiceDeliveryStatus',\n fields=[\n ('disabled_on', models.DateTimeField(blank=True, null=True)),\n ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),\n ('name', models.TextField(blank=True)),\n ('order', models.FloatField(default=0.0)),\n ],\n options={\n 'ordering': ('order',),\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='PolicyArea',\n fields=[\n ('disabled_on', models.DateTimeField(blank=True, null=True)),\n ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),\n ('name', models.TextField(blank=True)),\n ('order', models.FloatField(default=0.0)),\n ],\n options={\n 'ordering': ('order',),\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='PolicyIssueType',\n fields=[\n ('disabled_on', models.DateTimeField(blank=True, null=True)),\n ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),\n ('name', models.TextField(blank=True)),\n ('order', models.FloatField(default=0.0)),\n ],\n options={\n 'ordering': ('order',),\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Interaction',\n fields=[\n ('created_on', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),\n ('modified_on', models.DateTimeField(auto_now=True, null=True)),\n ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),\n ('date', models.DateTimeField()),\n ('subject', models.TextField()),\n ('notes', models.TextField(blank=True, max_length=10000)),\n ('company', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='interactions', to='company.Company')),\n ('communication_channel', models.ForeignKey(blank=True, help_text='For interactions only.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='interaction.CommunicationChannel')),\n ('service', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='metadata.Service')),\n ('investment_project', models.ForeignKey(blank=True, help_text='For interactions only.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='interactions', to='investment.InvestmentProject')),\n ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),\n ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),\n ('kind', models.CharField(choices=[('interaction', 'Interaction'), ('service_delivery', 'Service delivery')], max_length=255)),\n ('event', models.ForeignKey(blank=True, help_text='For service deliveries only.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='interactions', to='event.Event')),\n ('archived_documents_url_path', models.CharField(blank=True, help_text='Legacy field. File browser path to the archived documents for this interaction.', max_length=255)),\n ('service_delivery_status', models.ForeignKey(blank=True, help_text='For service deliveries only.', null=True, on_delete=django.db.models.deletion.PROTECT, to='interaction.ServiceDeliveryStatus', verbose_name='status')),\n ('grant_amount_offered', models.DecimalField(blank=True, decimal_places=2, help_text='For service deliveries only.', max_digits=19, null=True)),\n ('net_company_receipt', models.DecimalField(blank=True, decimal_places=2, help_text='For service deliveries only.', max_digits=19, null=True)),\n ('policy_areas', models.ManyToManyField(blank=True, related_name='interactions', to='interaction.PolicyArea')),\n ('policy_issue_types', models.ManyToManyField(blank=True, related_name='interactions', to='interaction.PolicyIssueType')),\n ('policy_feedback_notes', models.TextField(blank=True, default='')),\n ('was_policy_feedback_provided', models.BooleanField()),\n ('contacts', models.ManyToManyField(blank=True, related_name='interactions', to='company.Contact')),\n ('status', models.CharField(choices=[('draft', 'Draft'), ('complete', 'Complete')], default='complete', max_length=255)),\n ('archived', models.BooleanField(default=False)),\n ('archived_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),\n ('archived_on', models.DateTimeField(blank=True, null=True)),\n ('archived_reason', models.TextField(blank=True, null=True)),\n ('source', django.contrib.postgres.fields.jsonb.JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True)),\n ('theme', models.CharField(blank=True, choices=[(None, 'Not set'), ('export', 'Export'), ('investment', 'Investment'), ('other', 'Something else')], max_length=255, null=True)),\n ('service_answers', django.contrib.postgres.fields.jsonb.JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True)),\n ],\n options={\n 'abstract': False,\n 'indexes': [\n models.Index(fields=['-date', '-created_on'], name='interaction_date_06c266_idx'),\n models.Index(fields=['modified_on', 'id'], name='interaction_modifie_d52a56_idx'),\n django.contrib.postgres.indexes.GinIndex(fields=['source'], name='interaction_source_cfbd11_gin'),\n models.Index(fields=['company', '-date', '-created_on', 'id'], name='interaction_company_236ca9_idx'),\n ],\n 'default_permissions': ('add_all', 'change_all', 'delete', 'view_all'),\n 'permissions': (('view_associated_investmentproject_interaction', 'Can view interaction for associated investment projects'), ('add_associated_investmentproject_interaction', 'Can add interaction for associated investment projects'), ('change_associated_investmentproject_interaction', 'Can change interaction for associated investment projects'), ('export_interaction', 'Can export interaction')),\n },\n ),\n migrations.CreateModel(\n name='InteractionDITParticipant',\n fields=[\n ('id', models.BigAutoField(primary_key=True, serialize=False)),\n ('adviser', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),\n ('interaction', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dit_participants', to='interaction.Interaction')),\n ('team', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='metadata.Team')),\n ],\n options={\n 'default_permissions': (),\n 'unique_together': {('interaction', 'adviser')},\n },\n ),\n migrations.CreateModel(\n name='ServiceQuestion',\n fields=[\n ('disabled_on', models.DateTimeField(blank=True, null=True)),\n ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),\n ('name', models.TextField(blank=True)),\n ('order', models.FloatField(default=0.0)),\n ('service', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='interaction_questions', to='metadata.Service',)),\n ],\n options={\n 'ordering': ('order',),\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='ServiceAnswerOption',\n fields=[\n ('disabled_on', models.DateTimeField(blank=True, null=True)),\n ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),\n ('name', models.TextField(blank=True)),\n ('order', models.FloatField(default=0.0)),\n ('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answer_options', to='interaction.ServiceQuestion')),\n ],\n options={\n 'ordering': ('order',),\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='ServiceAdditionalQuestion',\n fields=[\n ('disabled_on', models.DateTimeField(blank=True, null=True)),\n ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),\n ('name', models.TextField(blank=True)),\n ('order', models.FloatField(default=0.0)),\n ('type', models.CharField(choices=[('text', 'Text'), ('money', 'Money')], max_length=255)),\n ('is_required', models.BooleanField(default=False)),\n ('answer_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='additional_questions', to='interaction.ServiceAnswerOption')),\n ],\n options={\n 'ordering': ('order',),\n 'abstract': False,\n },\n ),\n migrations.RunPython(\n code=load_initial_statuses,\n reverse_code=migrations.RunPython.noop,\n ),\n migrations.RunPython(\n code=load_initial_policy_area,\n reverse_code=migrations.RunPython.noop,\n ),\n migrations.RunPython(\n code=load_initial_policy_issue_type,\n reverse_code=migrations.RunPython.noop,\n ),\n migrations.RunPython(\n code=load_initial_communication_channels,\n reverse_code=migrations.RunPython.noop,\n ),\n migrations.RunPython(\n code=load_service_questions_and_answers,\n reverse_code=migrations.RunPython.noop,\n ),\n ]\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/interaction/migrations/0001_squashed_0068_remove_interaction_location_from_database.py","file_name":"0001_squashed_0068_remove_interaction_location_from_database.py","file_ext":"py","file_size_in_byte":12905,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"3317947622","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.contrib.gis.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tms', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Ne50MOcean',\n fields=[\n ('gid', models.AutoField(serialize=False, primary_key=True)),\n ('scalerank', models.DecimalField(null=True, max_digits=10, decimal_places=0, blank=True)),\n ('featurecla', models.CharField(max_length=32, null=True, blank=True)),\n ('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326, null=True, blank=True)),\n ],\n options={\n 'db_table': 'ne_50m_ocean',\n 'managed': False,\n },\n ),\n ]\n","repo_name":"LiliyaStefanova/project_navinur","sub_path":"navinur/tms/migrations/0002_ne50mocean.py","file_name":"0002_ne50mocean.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26885922807","text":"from flask import Flask, render_template, url_for, redirect, abort, flash, jsonify, request\nfrom flask_bootstrap import Bootstrap\nfrom forms import CreateColivingForm, RegisterForm, LoginForm\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm import relationship\nfrom flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user\nfrom functools import wraps\nfrom datetime import date\nfrom flask_ckeditor import CKEditor\nimport random\n\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'\nBootstrap(app)\n\n##CONNECT TO DB\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///colivings.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nckeditor = CKEditor(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n\n# to be defined\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n## database tables\nclass User(UserMixin, db.Model):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(100), unique=True)\n password = db.Column(db.String(100))\n name = db.Column(db.String(100))\n posts = relationship(\"ColivingPost\", back_populates=\"author\")\n #comments = relationship(\"Comment\", back_populates=\"comment_author\")\n\n\nclass ColivingPost(db.Model):\n __tablename__ = \"coliving_posts\"\n id = db.Column(db.Integer, primary_key=True)\n author_id = db.Column(db.Integer, db.ForeignKey(\"users.id\"))\n author = relationship(\"User\", back_populates=\"posts\")\n title = db.Column(db.String(250), unique=True, nullable=False)\n location = db.Column(db.String(500), unique=True, nullable=False)\n description_short = db.Column(db.String(250), nullable=False)\n description_long = db.Column(db.String(250), nullable=False)\n urbanlife = db.Column(db.String(250), nullable=False)\n changelocations = db.Column(db.String(250), nullable=False)\n coffee = db.Column(db.String(250), nullable=False)\n wifi = db.Column(db.String(250), nullable=False)\n sockets = db.Column(db.String(250), nullable=False)\n vibe = db.Column(db.String(250), nullable=False)\n date = db.Column(db.String(250), nullable=False)\n img_url = db.Column(db.String(250), nullable=False)\n\n def to_dict(self):\n return {column.name: getattr(self, column.name) for column in self.__table__.columns}\ndb.create_all()\n\n@app.route('/')\ndef get_all_colivings():\n colivings = ColivingPost.query.all()\n return render_template(\"index.html\", all_colivings=colivings, current_user=current_user)\n\n@app.route('/login', methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n email = form.email.data\n password = form.password.data\n\n user = User.query.filter_by(email=email).first()\n # Email doesn't exist or password incorrect.\n if not user:\n flash(\"That email does not exist, please try again.\")\n return redirect(url_for('login'))\n elif not check_password_hash(user.password, password):\n flash('Password incorrect, please try again.')\n return redirect(url_for('login'))\n else:\n login_user(user)\n return redirect(url_for('get_all_colivings'))\n return render_template(\"login.html\", form=form, current_user=current_user)\n\n\ndef admin_only(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if current_user.id != 1:\n return abort(403)\n return f(*args, **kwargs)\n return decorated_function\n\n\n@app.route('/register', methods=[\"GET\", \"POST\"])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n\n if User.query.filter_by(email=form.email.data).first():\n print(User.query.filter_by(email=form.email.data).first())\n #User already exists\n flash(\"You've already signed up with that email, log in instead!\")\n return redirect(url_for('login'))\n\n hash_and_salted_password = generate_password_hash(\n form.password.data,\n method='pbkdf2:sha256',\n salt_length=8\n )\n new_user = User(\n email=form.email.data,\n name=form.name.data,\n password=hash_and_salted_password,\n )\n db.session.add(new_user)\n db.session.commit()\n login_user(new_user)\n return redirect(url_for(\"get_all_colivings\"))\n\n return render_template(\"register.html\", form=form, current_user=current_user)\n\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\", current_user=current_user)\n\n@app.route(\"/coliving/\", methods=[\"GET\", \"POST\"])\ndef show_coliving(coliving_id):\n requested_coliving = ColivingPost.query.get(coliving_id)\n\n return render_template(\"post.html\", coliving=requested_coliving, current_user=current_user)\n\n\n@app.route(\"/edit-coliving/\", methods=[\"GET\", \"POST\"])\n@admin_only\n@login_required\ndef edit_coliving(coliving_id):\n entry = ColivingPost.query.get(coliving_id)\n edit_form = CreateColivingForm(\n title=entry.title,\n location=entry.location,\n description_short=entry.description_short,\n description_long=entry.description_long,\n urbanlife=entry.urbanlife,\n changelocations=entry.changelocations,\n wifi=entry.wifi,\n coffee=entry.coffee,\n sockets=entry.sockets,\n vibe=entry.vibe,\n author=current_user,\n img_url=entry.img_url,\n )\n\n if edit_form.validate_on_submit():\n entry.title = edit_form.title.data\n entry.location = edit_form.location.data\n entry.description_short = edit_form.description_short.data\n entry.description_long = edit_form.description_long.data\n entry.urbanlife = edit_form.urbanlife.data\n entry.changelocations = edit_form.changelocations.data\n entry.wifi = edit_form.wifi.data\n entry.coffee = edit_form.coffee.data\n entry.sockets = edit_form.sockets.data\n entry.vibe = edit_form.vibe.data\n entry.img_url = edit_form.img_url.data\n db.session.commit()\n return redirect(url_for(\"show_coliving\", coliving_id=entry.id))\n\n return render_template(\"make-post.html\", form=edit_form, is_edit=True, current_user=current_user)\n\n\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('get_all_colivings'))\n\n\n@app.route(\"/new-post\", methods=[\"GET\", \"POST\"])\n@admin_only\ndef add_new_coliving():\n form = CreateColivingForm()\n if form.validate_on_submit():\n new_post = ColivingPost(\n title=form.title.data,\n location=form.location.data,\n description_short=form.description_short.data,\n description_long=form.description_long.data,\n urbanlife=form.urbanlife.data,\n changelocations=form.changelocations.data,\n wifi=form.wifi.data,\n coffee=form.coffee.data,\n sockets=form.sockets.data,\n vibe=form.vibe.data,\n author=current_user,\n img_url=form.img_url.data,\n date=date.today().strftime(\"%B %d, %Y\")\n )\n db.session.add(new_post)\n db.session.commit()\n return redirect(url_for(\"get_all_colivings\"))\n\n return render_template(\"make-post.html\", form=form, current_user=current_user)\n\n@app.route(\"/delete/\")\n@admin_only\ndef delete_coliving(coliving_id):\n post_to_delete = ColivingPost.query.get(coliving_id)\n db.session.delete(post_to_delete)\n db.session.commit()\n return redirect(url_for('get_all_colivings'))\n\n#APIs\n@app.route('/api/info')\ndef explain_the_api():\n colivings = ColivingPost.query.all()\n return render_template(\"api.html\", all_colivings=colivings, current_user=current_user)\n\n\n@app.route(\"/API/random\")\ndef API_random_coliving():\n colivings = ColivingPost.query.all()\n random_coliving = random.choice(colivings)\n return jsonify(cafe=random_coliving.to_dict())\n\n\n@app.route(\"/API/all\")\ndef API_all_colivings():\n colivings = ColivingPost.query.all()\n return jsonify(cafes=[coliving.to_dict() for coliving in colivings])\n\n\n@app.route(\"/API/search\")\ndef get_cafe_at_location():\n coliving_name = request.args.get(\"loc\")\n coliving = db.session.query(ColivingPost).filter_by(title=coliving_name).first()\n if coliving:\n return jsonify(coliving=coliving.to_dict())\n else:\n return jsonify(error={\"Not Found\": \"Sorry, we don't have a cafe at that location.\"}), 404\n\n#@app.route(\"/API/search\")\n#def get_cafe_at_location():\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"Jarmo2/100_days_of_code_coding","sub_path":"day-87/website_for_colivings/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72184112586","text":"import numpy as np\nimport commpy as cp\n\n\ndef get_ber_bler(estimated_bits, original_bits):\n \"\"\"Compute Bit Error Rate and Block Error Rate.\n\n Arguments:\n ----------\n estimated_bits: int - size [batch, data_len]\n original_bits: int - size [batch, data_len]\n\n Returns:\n ber: float - Bit Error Rate\n bler: float - Block Error Rate\n \"\"\"\n hamming_distances = []\n for i in range(len(original_bits)):\n distance = cp.utilities.hamming_dist(original_bits[i].astype(int), \n estimated_bits[i].astype(int))\n hamming_distances.append(distance)\n ber = np.sum(hamming_distances) / np.product(np.shape(original_bits))\n bler = np.count_nonzero(hamming_distances) / len(original_bits)\n return ber, bler\n","repo_name":"laurabrink13/ML-Receiver","sub_path":"radioml/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"8969191082","text":"DEBUG = True\n\nimport math\n\n\ndef FuelToLaunch(mass):\n return (math.floor(mass / 3)) - 2\n\n\ndef AdvancedFuelToLaunch(mass):\n # this is recursive\n\n # fuel cost for the module, this is constant\n fuel_mass = max(FuelToLaunch(mass), 0)\n\n fuel_fuel = 0\n if fuel_mass > 0:\n fuel_fuel = AdvancedFuelToLaunch(fuel_mass)\n\n return fuel_fuel + fuel_mass\n\n\ndef read_src():\n src_file = \"example.txt\" if DEBUG else \"src.txt\"\n with open(src_file, \"r\") as fh:\n content = fh.read().splitlines()\n\n modules = [int(l) for l in content]\n return modules\n\n\ndef task_1():\n modules = read_src()\n total_fuel = sum([FuelToLaunch(m) for m in modules])\n print(f\"task 1: {total_fuel}\")\n\n\ndef task_2():\n modules = read_src()\n total_fuel = sum([AdvancedFuelToLaunch(m) for m in modules])\n print(f\"task 2: {total_fuel}\")\n\n\nif __name__ == \"__main__\":\n DEBUG = False\n task_1()\n task_2()\n","repo_name":"Robtom5/AoC","sub_path":"AOC_2019/Day01/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38817778268","text":"\"\"\"\nImage processing module for the 2DOF platform.\n\"\"\"\n\nimport cv2\nimport ConfigParser\nimport csv\nimport time\nfrom datetime import datetime\n\nclass CameraFeedback:\n \"\"\"\n Image processing class. Provides ball localization.\n \"\"\"\n\n def __init__(self, config_file, log = False):\n \"\"\"\n Initialize image processing from config_file settings.\n \"\"\"\n\n # Platform parameters\n config = ConfigParser.RawConfigParser()\n config.read(config_file)\n self.x = config.getfloat('Controller','x0')\n self.y = config.getfloat('Controller','y0')\n \n # Camera parameters\n self.cam_idx = config.getint('Camera','cam_idx')\n self.ax = config.getfloat('Camera','ax')\n self.bx = config.getfloat('Camera','bx')\n self.ay = config.getfloat('Camera','ay')\n self.by = config.getfloat('Camera','by')\n self.thresh = config.getint('Camera','thresh')\n org = config.get('Camera','rect_org_px')\n self.rect_org_px = tuple(int(i) for i in org.strip('()').split(','))\n end = config.get('Camera','rect_end_px')\n self.rect_end_px = tuple(int(i) for i in end.strip('()').split(','))\n self.ball_area_min = config.getint('Camera','ball_area_min')\n self.ball_area_max = config.getint('Camera','ball_area_max')\n self.x_px = self.rect_org_px[0] + int(self.rect_end_px[0] - self.rect_org_px[0])/2\n self.y_px = self.rect_org_px[1] + int(self.rect_end_px[1] - self.rect_org_px[1])/2\n\n # Algorithm parameters\n self.detected = False\n self.show_on = False\n self.debug_on = False\n self.__log_on = log\n\n\t\t# Data logging\n if self.__log_on:\n now_str = datetime.now().__str__().split('.')[0]\n now_str = now_str.replace(' ','-').replace(':','-')\n self.logfile = open(now_str + '_camera_log.csv','wb')\n self.logger = csv.writer(self.logfile, delimiter=';')\n\t\t\n self._capture = cv2.VideoCapture(self.cam_idx)\n if not self._capture:\n raise Exception(\"Can't capture from Camera {0}!\".format(self.cam_idx))\n\n (success, self._image) = self._capture.read()\n self.decorated_image = self._image.copy() # Image for displaying\n self._image_gray = cv2.cvtColor(self._image, cv2.COLOR_RGB2GRAY)\n (ret, self._image_thresh) = cv2.threshold(self._image_gray, self.thresh,\n 255, cv2.THRESH_BINARY)\n self.contours = []\n \n def update(self):\n \"\"\"\n Update ball position reading from camera image.\n\n Returns ball position as (x,y) tuple.\n \"\"\"\n\n self.detected = False\n ### Perform image segmentation to obtain contours ###\n (success, self._image) = self._capture.read()\n cv2.cvtColor(self._image, cv2.COLOR_RGB2GRAY, self._image_gray)\n cv2.threshold(self._image_gray, self.thresh, 255,\n cv2.THRESH_BINARY, self._image_thresh)\n cv2.inRange(self._image_thresh, (240), (255), self._image_thresh)\n # Gaussian blurring removes small artefacts?\n cv2.GaussianBlur(self._image_thresh, (9,9), 0, self._image_thresh)\n [self.contours,hierarchy] = cv2.findContours(self._image_thresh[self.rect_org_px[1]:self.rect_end_px[1],\n self.rect_org_px[0]:self.rect_end_px[0]],\n cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(self._image_thresh, self.contours, -1, (255,0,0), -1,\n offset = self.rect_org_px)\n\n ### Process contours to find the right one (the ball on the plate) ###\n for cont in self.contours:\n m = cv2.moments(cont)\n area = m['m00']\n if (area > self.ball_area_min) and (area < self.ball_area_max):\n self.x_px = int(m['m10']/area) + self.rect_org_px[0]\n self.y_px = int(m['m01']/area) + self.rect_org_px[1]\n self.detected = True\n self.x = (self.x_px - self.bx) / self.ax\n self.y = (self.y_px - self.by) / self.ay\n break\n \n if self.__log_on:\n self.logger.writerow([time.time(),self.x,self.y])\n\t\t\n self.show_image()\n return (self.x, self.y)\n \n def run(self):\n \"\"\"\n Calls the update method in a loop and prints\n estimated (x,y) coordinates.\n \"\"\"\n while True:\n self.update()\n #print(self.x, self.y)\n if cv2.waitKey(1) & 0xFF == 27:\n break\n\n self.cleanup()\n \n def show_image(self):\n \"\"\"\n Show original image.\n \"\"\"\n \n # Always decorate the image, because somebody else might be showing it.\n self.decorate_image()\n if self.show_on:\n cv2.namedWindow('Camera image', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('Camera image', self.decorated_image)\n if self.debug_on:\n cv2.namedWindow('Grayscale image', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('Grayscale image', self._image_gray)\n cv2.namedWindow('Thresholded image', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('Thresholded image', self._image_thresh)\n \n def decorate_image(self):\n \"\"\"\n Decorate original image with tracking area and ball estimate.\n \"\"\"\n self.decorated_image = self._image.copy()\n cv2.rectangle(self.decorated_image, self.rect_org_px, self.rect_end_px, (0,0,255), 2)\n cv2.circle(self.decorated_image, (self.x_px,self.y_px), 10, (0,0,255), 2)\n \n def cleanup(self):\n self._capture.release()\n cv2.destroyAllWindows()\n if self.__log_on:\n self.logfile.close()\n \nif __name__ == '__main__':\n\n cam = CameraFeedback('platform.cfg', log=True)\n cam.show_on = True\n cam.debug_on = True\n \n cam.run()\n \n","repo_name":"br5555/oiu","sub_path":"2dof_platform/oiu-lab1-014/camera_feedback.py","file_name":"camera_feedback.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8860200353","text":"from collections import defaultdict\n\nimport click\n\nfrom gcs_metric_extract.cli import default_command\nfrom gcs_metric_extract.output import sanitize_format\n\nOPTIONS = defaultdict(bool)\n\n\n@click.group\n@click.option(\n \"--format\",\n help=\"Format of the output. Valid options are json, ldjson, and csv.\")\n@click.option(\"--lookback\",\n help=\"(Optional) Number of seconds to look back in the query. \"\n \"Use to limit data sent. Default: 660\",\n type=int,\n default=660)\n@click.option(\n \"--points\",\n help=\"(Optional) Number of points to report back. Useful for getting a \"\n \"fixed number of points when your lookback may not always return the same \"\n \"number. Use -1 for all. Default: 1.\",\n type=int,\n default=1)\ndef main(format: str, lookback: int, points: int):\n OPTIONS[\"FORMAT\"] = sanitize_format(format)\n OPTIONS[\"LOOKBACK\"] = lookback\n OPTIONS[\"POINTS\"] = points\n\n\n@main.command(help=\"Get the number of API calls made against a bucket.\")\n@click.argument(\"project_ids\", nargs=-1)\ndef api_request_count(project_ids):\n metric_url = \"storage.googleapis.com/api/request_count\"\n resource_label = \"bucket_name\"\n metric_labels = [\"method\", \"response_code\"]\n return default_command(OPTIONS, project_ids, metric_url, resource_label,\n metric_labels)\n\n\n@main.command(help=\"Get the number of objects in a bucket.\")\n@click.argument(\"project_ids\", nargs=-1)\ndef object_count(project_ids):\n metric_url = \"storage.googleapis.com/storage/object_count\"\n resource_label = \"bucket_name\"\n metric_labels = [\"storage_class\"]\n return default_command(OPTIONS, project_ids, metric_url, resource_label,\n metric_labels)\n\n\n@main.command(help=\"Get the number of byte-seconds used by a bucket.\")\n@click.argument(\"project_ids\", nargs=-1)\ndef total_byte_seconds(project_ids):\n metric_url = \"storage.googleapis.com/storage/total_byte_seconds\"\n resource_label = \"bucket_name\"\n metric_labels = [\"storage_class\"]\n return default_command(OPTIONS, project_ids, metric_url, resource_label,\n metric_labels)\n\n\n@main.command(help=\"Get the number of bytes stored in a bucket.\")\n@click.argument(\"project_ids\", nargs=-1)\ndef total_bytes(project_ids):\n metric_url = \"storage.googleapis.com/storage/total_bytes\"\n resource_label = \"bucket_name\"\n metric_labels = [\"storage_class\"]\n return default_command(OPTIONS, project_ids, metric_url, resource_label,\n metric_labels)\n\n\n@main.command(help=\"Get any GCS metric. \"\n \"Give multiple METRIC_LABELS as quoted, comma delimited list.\")\n@click.argument(\"metric_url\", nargs=1)\n@click.argument(\"resource_label\", nargs=1)\n@click.argument(\"metric_labels\", nargs=1)\n@click.argument(\"project_ids\", nargs=-1)\ndef get_metric(metric_url, resource_label, metric_labels, project_ids):\n metric_labels = metric_labels.split(\",\")\n return default_command(OPTIONS, project_ids, metric_url, resource_label,\n metric_labels)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"domZippilli/gcs_metric_extract","sub_path":"gcs_metric_extract/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14297233911","text":"system = \"\" # no system set\n\n# Choose your system by uncommenting one of the following (i.e. delete\n# the leading two characters '# ')\n# system = \"volumio\"\n# system = \"moode\"\n# system = \"mpd\"\n\n# Set your dial parameters\ndivs = 100 # Number of volume divisions\nmin_val = 100 # ADS1X15 reading for volume 0\nmax_val = 26300 # ADS1X15 reading for volume 100\nmax_volume = 100 # Maximum value for the volume\n\n# Shouldn't need to change anything below\n\ndiff = max_val - min_val\nfor div in range (divs + 1):\n frac = float(div)/divs\n volume = int(max_volume * frac)\n val = min_val + int(frac * diff)\n if system == \"volumio\":\n print(\"%05d = volume_%03d,volumio volume %d\" %(val, volume, volume))\n elif system == \"moode\":\n print(\"%05d = volume_%03d,mpc -q volume %d && \" % (val, volume, volume) +\n \"sqlite3 /var/local/www/db/moode-sqlite3.db \" +\n \"\\\"UPDATE cfg_system SET value='%d' \" % (volume) +\n \"WHERE param='volknob'\\\"\")\n elif system == \"mpd\":\n print(\"%05d = volume_%03d,mpc -q volume %d\" % (val, volume, volume))\n else:\n print(\"No system set, edit the script to set the 'system' variable\")\n break\n","repo_name":"antiprism/turnandrun","sub_path":"doc/resources/vols_conf.py","file_name":"vols_conf.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"72744194826","text":"import sys\nsys.setrecursionlimit(1000000)\nm , n = map(int,sys.stdin.readline().split())\npicture = [list(map(int, sys.stdin.readline().split())) for _ in range(m)]\nresult = []\ndef dfs(x,y):\n global cnt\n if x < 0 or x >= m or y < 0 or y >= n:\n return False\n if picture[x][y] == 1:\n picture[x][y] = 0\n cnt +=1\n dfs(x-1,y)\n dfs(x,y-1)\n dfs(x+1,y)\n dfs(x,y+1)\n return True\n return False\ncnt = 0\nfor i in range(m):\n for j in range(n\n ):\n if picture[i][j] == 1:\n dfs(i,j)\n result.append(cnt)\n cnt = 0\nprint(len(result))\nif len(result) == 0:\n print(0)\nelse:\n print(max(result))\n\n \n\n \n","repo_name":"kangjunyoung37/algorizm-python","sub_path":"탐색/1926번 그림.py","file_name":"1926번 그림.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12547789721","text":"login_correto = 'Dirack'\nsenha_correto = '12345'\n\nwhile True:\n\tlogin = input('Login: ')\n\tsenha = input('Senha: ')\n\n\tif login != login_correto:\n\t\tprint('Erro, login incorreto')\n\t\tcontinue\n\telif senha != senha_correto:\n\t\tprint('Erro, senha incorreta')\n\t\tcontinue\n\telse:\n\t\tprint('Login realizado com sucesso!')\n\t\tbreak\n","repo_name":"Geofisicando/Automatize-tarefas-macantes-com-Python","sub_path":"examples/cap2/loop/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"21935195524","text":"from flask import Flask,redirect,url_for,request,render_template\r\n\r\napp=Flask(__name__)\r\n\r\n@app.route('/')\r\ndef hello1():\r\n return 'Hi'\r\n\r\n@app.route('//')\r\ndef hello_world(name):\r\n dict1 = {'phy':50,'che':60,'maths':70}\r\n return render_template('page1.html', user = name,result=dict1)\r\n \"\"\"if name=='santhosh':\r\n return '

hello %s

' % name\r\n else:\r\n return '

hello guest %s

' % name\"\"\"\r\n\r\n@app.route('/san/',methods=['POST','GET'])\r\ndef san(user):\r\n if request.method == 'POST':\r\n user = request.form['nm']\r\n return redirect(url_for('hello_world',name = user))\r\n else:\r\n user = request.args.get('nm')\r\n return redirect(url_for('hello_world',name = user))\r\n #return redirect(url_for('hello_world',name=user))\r\n #render_template(‘hello.html’)\r\n\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n app.run(port=5000,debug=True,use_reloader=False)","repo_name":"Santhosh-A-K/Flask","sub_path":"Flask/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74794573705","text":"class Solution:\n def partition(self, s: str) -> List[List[str]]:\n '''\n backtracking + dp\n '''\n \n ans = []\n dp = collections.defaultdict(bool)\n \n \n def isPalindrome(start, end):\n end -= 1\n while start < end:\n if s[start] != s[end]:\n return False\n start += 1\n end -= 1\n return True\n \n \n def backtracking(start, curList):\n if start >= len(s):\n ans.append(curList[:])\n return\n \n for end in range(start, len(s)):\n if s[end] == s[start] and (end-start <= 2 or dp[start+1, end-1] == True):\n dp[start, end] = True\n curList.append(s[start:end+1])\n backtracking(end+1, curList)\n curList.pop()\n \n backtracking(0, [])\n return ans\n","repo_name":"novayo/LeetCode","sub_path":"0131_Palindrome_Partitioning/try_3.py","file_name":"try_3.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"74393878665","text":"import numpy as np\nimport pandas as pd\nfrom .utils import * \nimport networkx as nx\nfrom scipy.stats import expon, gamma\n\n\n\n# Behaviors\ndef driving_process(params, step, sL, s):\n '''\n Driving process for adding new participants (their funds) and new proposals.\n '''\n arrival_rate = 10/(1+s['sentiment'])\n rv1 = np.random.rand()\n new_participant = bool(rv1<1/arrival_rate)\n\n network = s['network']\n \n proposals = get_nodes_by_type(network, 'proposal')\n participants = get_nodes_by_type(network, 'participant')\n\n candidate_proposals = [j for j in proposals if network.nodes[j]['status']=='candidate']\n subgraph_nodes = candidate_proposals+participants\n\n candidate_subgraph = s['network'].subgraph(subgraph_nodes)\n supporters = get_edges_by_type(candidate_subgraph, 'support')\n \n available_supply = s['total_supply']-s['effective_supply']\n\n expected_holdings = .01*available_supply\n if new_participant:\n h_rv = expon.rvs(loc=0.0, scale=expected_holdings)\n new_participant_holdings = h_rv\n else:\n new_participant_holdings = 0\n \n network = s['network']\n affinities = [network.edges[e]['affinity'] for e in supporters if e[1] in candidate_proposals]\n median_affinity = np.median(affinities)\n \n fund_requests = [network.nodes[j]['funds_requested'] for j in candidate_proposals]\n \n funds = s['funds']\n total_funds_requested = np.sum(fund_requests)\n \n if total_funds_requested == 0:\n new_proposal = True\n new_proposal_ct = 3\n else:\n proposal_rate = 1/(1-median_affinity) * total_funds_requested/funds\n rv2 = np.random.rand()\n new_proposal = bool(rv2<1/proposal_rate)\n new_proposal_ct = int(1-median_affinity)+1\n\n expected_request = params['beta']*s['funds']/10\n new_proposal_requested = [expon.rvs(loc=expected_request/10, scale=expected_request) for ct in range(new_proposal_ct)]\n \n sentiment = s['sentiment']\n funds = s['funds']\n scale_factor = funds*sentiment**2/10000\n \n \n return({'new_participant':new_participant, #True/False\n 'new_participant_holdings':new_participant_holdings, #funds held by new participant if True\n 'new_proposal':new_proposal, #True/False\n 'new_proposal_ct': new_proposal_ct, #int\n 'new_proposal_requested':new_proposal_requested, #list funds requested by new proposal if True, len =ct\n }) \n \n# Mechanisms \ndef update_network(params, step, sL, s, _input):\n '''\n Add new participants and proposals to network object\n '''\n\n network = s['network']\n funds = s['funds']\n supply = s['effective_supply']\n\n new_participant = _input['new_participant'] \n new_proposal = _input['new_proposal']\n\n if new_participant:\n new_participant_holdings = _input['new_participant_holdings']\n network = gen_new_participant(network, new_participant_holdings)\n \n if new_proposal:\n for ct in range(_input['new_proposal_ct']):\n funds_req = _input['new_proposal_requested'][ct]\n network= gen_new_proposal(network,funds,supply, funds_req,params)\n \n #update age of the existing proposals\n proposals = get_nodes_by_type(network, 'proposal')\n \n for j in proposals:\n network.nodes[j]['age'] = network.nodes[j]['age']+1\n if network.nodes[j]['status'] == 'candidate':\n requested = network.nodes[j]['funds_requested']\n network.nodes[j]['trigger'] = trigger_threshold(requested, funds, supply, params['alpha'],params)\n else:\n network.nodes[j]['trigger'] = np.nan\n \n key = 'network'\n value = network\n \n return (key, value)\n\n\ndef increment_supply(params, step, sL, s, _input):\n '''\n Increase supply by the amount of the new particpant's funds.\n '''\n supply = s['effective_supply']\n\n if _input['new_participant_holdings']:\n supply = supply + _input['new_participant_holdings']\n \n key = 'effective_supply'\n value = supply\n \n return (key, value)\n\n# Behaviors\n# Substep 2\ndef minting_rule(params, step, sL, s):\n supply = s['total_supply']\n tokens_to_mint = params['gamma'] * supply #order 0.001 or smaller: expansion of supply per day \n return ({'mint':tokens_to_mint})\n\n# Mechanisms \ndef mint_to_supply(params, step, sL, s, _input):\n mint = _input['mint']\n supply = s['total_supply']\n\n key = 'total_supply'\n value = supply + mint\n \n return (key, value)\n\ndef mint_to_funds(params, step, sL, s, _input):\n mint = _input['mint']\n funds = s['funds']\n\n key = 'funds'\n value = funds + mint\n \n return (key, value)","repo_name":"1Hive/conviction-voting-cadcad","sub_path":"models/v3/model/parts/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"81"} +{"seq_id":"40296737578","text":"import pymongo\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nclient = MongoClient('mongodb://localhost:27017/')\ndb = client['bot']\narticle = {\"author\": \"Derrick Mwiti\",\n \"about\": \"Introduction to MongoDB and Python\",\n \"tags\":\n [\"mongodb\", \"python\", \"pymongo\"]}\narticles = db.articles\nquery = { \"author\": \"Derrick Mwiti\" }\nnew_author = { \"$set\": { \"author\": \"John David\" } }\n\narticles.update_one(query, new_author)\n\nfor article in articles.find():\n print(article)","repo_name":"paveltsytovich/telegram-course","sub_path":"Code/Module 8/Live/mongo-update.py","file_name":"mongo-update.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22388737679","text":"import datetime\n\n#clean price\ndef clean_price(price_str):\n try:\n split_price = price_str.split('$')\n if len(split_price) != 1:\n price_float = float(split_price[1])\n else:\n raise ValueError\n except ValueError:\n input('''\n \\n**********PRICE ERROR **********\n \\rThe price should be a number with a currency symbol\n \\rEX: $10.99\n \\rPress enter to try again\n \\r*******************************''')\n return\n else:\n return int(price_float * 100)\n\n\n# clean date\ndef clean_date(date_str):\n try:\n split_date = date_str.split(\"/\")\n month = int(split_date[0])\n day = int(split_date[1])\n year = int(split_date[2])\n return_date = datetime.date(year, month, day)\n\n except ValueError:\n input('''\n \\n**********DATE ERROR **********\n \\rThe date format should include a valid month, date, and year in the past, separated by a forward slash\n \\rEX: 2/3/2023\n \\rPress enter to try again\n \\r*******************************''')\n return\n else:\n return return_date\n\n\n#clean id\ndef clean_id(id_str, options):\n try:\n product_id = int(id_str)\n except ValueError:\n input('''\n \\n**********ID ERROR **********\n \\rThe ID should be a number\n \\rPress enter to try again\n \\r*******************************''')\n return\n else:\n if product_id in options:\n return product_id\n else:\n input(f'''\n \\n**********ID ERROR **********\n \\r The ID should be a valid selection from the list below\n \\rOptions: {options}\n \\rPress enter to try again\n \\r*******************************''')\n return\n\ndef clean_quantity(value):\n try:\n quantity = int(value)\n except ValueError:\n input('''\n \\n**********Quantity ERROR **********\n \\rThe Quantity should be a number\n \\rPress enter to try again\n \\r*******************************''')\n return\n else:\n return quantity","repo_name":"HuckleberryKBT/Grocery-Store-Inventory","sub_path":"cleaners.py","file_name":"cleaners.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25296070935","text":"# -*- coding: utf-8 -*-\r\nimport sqlite3\r\nimport re\r\n\r\nfrom config import debug\r\n\r\n\r\nclass SQLighter:\r\n\r\n def regexp(self, expr, item):\r\n reg = re.compile(expr)\r\n return reg.search(item) is not None\r\n\r\n def __init__(self, database):\r\n self.connection = sqlite3.connect(database)\r\n self.cursor = self.connection.cursor()\r\n self.connection.create_function(\"REGEXP\", 2, self.regexp)\r\n\r\n def insert_data(self, idRoute='', points='', countPoints='', time=''):\r\n \"\"\"добавление в базу данных\"\"\"\r\n with self.connection:\r\n sql = \"\"\"INSERT INTO routers(idRoute,points,countPoints,time)\r\n VALUES ('%(idRoute)s','%(points)s','%(countPoints)s','%(time)s');\r\n \"\"\" % {\"idRoute\": str(idRoute), \"points\": points, \"countPoints\": countPoints, \"time\": time}\r\n if debug == 1: print(sql)\r\n self.cursor.execute(sql)\r\n self.connection.commit()\r\n\r\n def select_all(self, table):\r\n \"\"\" Получаем все строки \"\"\"\r\n with self.connection:\r\n return self.cursor.execute(f'SELECT * FROM {table}').fetchall()\r\n\r\n def select_single(self, rownum, table):\r\n \"\"\" Получаем одну строку с id rownum \"\"\"\r\n with self.connection:\r\n if debug == 1: print('SQL ' + 'SELECT * FROM ' + table + ' WHERE id = ', rownum)\r\n return self.cursor.execute('SELECT * FROM ' + table + ' WHERE id = ?', (rownum,)).fetchall()[0]\r\n\r\n def count_rows(self, table):\r\n \"\"\" Считаем количество строк \"\"\"\r\n with self.connection:\r\n result = self.cursor.execute('SELECT * FROM ' + table).fetchall()\r\n return len(result)\r\n\r\n def select_routes(self, *points, time=0):\r\n \"\"\" Получаем все строки \"\"\"\r\n sqlPoints = \"\"\r\n for p in points:\r\n sqlPoints += f' AND points REGEXP \".*{p}.*\"'\r\n if debug == 1: print(f'SELECT * FROM routers WHERE time={time}{sqlPoints}')\r\n with self.connection:\r\n return self.cursor.execute(f'SELECT * FROM routers WHERE time={time}{sqlPoints}').fetchall()\r\n\r\n def select_route_with_end(self, points=[], time=0, countPoints=0):\r\n \"\"\" Получаем все строки \"\"\"\r\n if 1 < int(countPoints) < 21:\r\n countPoints = f\"AND countPoints={countPoints} \"\r\n else:\r\n countPoints = \"\"\r\n if countPoints == \"\":\r\n if 0 < len(points) < 3:\r\n if len(points) == 2:\r\n sql = f\"\"\"SELECT countPoints, COUNT(countPoints)\r\n FROM routers WHERE time={time} AND points REGEXP \".*{points[0]}.*{points[1]}$\"\r\n GROUP BY countPoints\r\n \"\"\"\r\n else:\r\n sql = f\"\"\"SELECT countPoints, COUNT(countPoints)\r\n FROM routers WHERE time={time} AND points REGEXP \".*{points[0]}.*\"\r\n GROUP BY countPoints\r\n \"\"\"\r\n if debug == 1: print(sql)\r\n with self.connection:\r\n return self.cursor.execute(sql).fetchall()\r\n\r\n if 0 < len(points) < 3:\r\n if len(points) == 2:\r\n sql = f'SELECT * from routers WHERE time={time} {countPoints}AND points REGEXP \".*{points[0]}.*{points[1]}$\"'\r\n else:\r\n sql = f'SELECT * from routers WHERE time={time} {countPoints}AND points REGEXP \".*{points[0]}.*\"'\r\n if debug == 1: print(sql)\r\n with self.connection:\r\n return self.cursor.execute(sql).fetchall()\r\n\r\n else:\r\n return None\r\n\r\n\r\n def close(self):\r\n \"\"\" Закрываем текущее соединение с БД \"\"\"\r\n self.connection.close()\r\n\r\n","repo_name":"N0infDude/BAU_1409_MOSH","sub_path":"python/bdUtils.py","file_name":"bdUtils.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5722595119","text":"import re, sys\n\n\ndef select_cats(cats):\n return list(filter(lambda x: len(re.findall(r'cat', x)) >= 2, cats))\n\n\ndef test():\n sys.stdin = open('3.2.1.input.txt', 'r')\n cats = []\n\n try:\n while True:\n cats.append(input())\n except EOFError:\n assert select_cats(cats) == ['catcat', 'cat and cat']\n\n\ndef run():\n for line in sys.stdin:\n line = line.rstrip()\n if len(re.findall(r'cat', line)) >= 2:\n print(line)\n\n\nrun()","repo_name":"genakoganovich/PythonBacicsApplication","sub_path":"3.TextAnalysis/3.2.Regex/3.2.1.cat_2times.py","file_name":"3.2.1.cat_2times.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24405855617","text":"from pynput.keyboard import Key, Listener\nimport logging\n#Association du log au fichier log.txt\nlogging.basicConfig(filename=(\"log.txt\"), level=logging.DEBUG, format=\"%(asctime)s - %(message)s\")\n\n#Retourne la saisie dans le fichier log\ndef on_press(key):\n logging.info(str(key))\n\n#Mise en écoute\nwith Listener(on_press=on_press) as listener:\n listener.join()","repo_name":"Andtit4/Keylogger-python","sub_path":"keylogger.py","file_name":"keylogger.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26528214507","text":"from conta import Conta, Poupanca\n\n\nagencia = {\n 'contas correntes': [],\n 'poupanças': []\n}\n\nwhile True:\n titular = input('Digite o nome do(a) titular: ')\n if titular == 'stop':\n break\n numero = input('Digite o número da conta: ')\n saldo_inicial = float(input('Digite o saldo inicial: '))\n tipo = input('Digite o tipo da conta: ').lower()\n try:\n if tipo == 'conta':\n c = Conta(titular, numero, saldo_inicial)\n agencia['contas correntes'].append(c)\n elif tipo == 'poupanca':\n c = Poupanca(titular, numero, saldo_inicial)\n agencia['poupanças'].append(c)\n else:\n raise ValueError('Tipo de conta desconhecido')\n print()\n except ValueError:\n print('Usuário tentou cadastrar tipo de conta desconhecido')\n\nprint('Salvando arquivo')\n\nwith open('agencia.txt', 'w') as arquivo:\n for tipo_de_conta in agencia:\n arquivo.write(tipo_de_conta + '\\n')\n for conta in agencia[tipo_de_conta]:\n arquivo.write(str(conta))\n arquivo.write('\\n')\n arquivo.write('#########################################')\n arquivo.write('\\n')\n\nprint('Arquivo salvo')\n","repo_name":"conexaomundom/Aulas-de-Python-2021","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7812725115","text":"# Given the names and grades for each student in a class of students, store them in a nested list and print the name(s) of any student(s) having the second lowest grade.\n\nst = []\nscores = set()\nsecond_l_name = []\n\nif __name__ == '__main__':\n for _ in range(int(input())):\n name = input()\n score = float(input())\n st.append([name, score])\n scores.add(score)\n\n second_lowest = sorted(scores)[1]\n\n for name, score in st:\n if score == second_lowest:\n second_l_name.append(name)\n\n for name in sorted(second_l_name):\n print(name)\n\n","repo_name":"jfgamag/Python-HackerRank","sub_path":"Nested lists.py","file_name":"Nested lists.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7611674690","text":"\ndef _clojure_binary_impl(ctx):\n output_jar = ctx.outputs.jar\n build_output = output_jar.path + \".build_output\"\n\n clojure_compile = \"(set! *compile-path* \\\\\\\"%s\\\\\\\") (compile '%s)\" % (build_output, ctx.attr.main_class)\n\n cmd = \"rm -rf %s\\n\" % build_output\n cmd += \"mkdir -p %s\\n\" % build_output\n \n clj_paths = []\n\n for srcfile in ctx.files.srcs:\n dirpath = build_output + \"/\" + srcfile.dirname\n cmd += \"mkdir -p %s\\n\" % dirpath\n cmd += \"cp %s %s\\n\" % (srcfile.path, dirpath)\n \n cmd += \"export JAVA_HOME=external/local_jdk\\n\"\n cmd += \"export JAVA_CLASSPATH=%s\\n\" % \":\".join([jar.path for jar in ctx.files._clojure_jars] + [build_output])\n cmd += \"java -cp $JAVA_CLASSPATH clojure.main -e \\\"%s\\\"\\n\" % clojure_compile\n cmd += \"find %s -name '*.class' | sed 's:^%s/::' > %s/class_list\\n\" % (\n build_output,\n build_output,\n build_output,\n )\n cmd += \"root=`pwd`\\n\"\n cmd += \"cd %s; $root/%s Cc ../%s @class_list\\n\" % (\n build_output,\n ctx.executable._zipper.path,\n output_jar.basename,\n )\n cmd += \"cd $root\\n\"\n\n print(cmd)\n\n ctx.action(\n inputs = (\n ctx.files.srcs\n + ctx.files._clojure_jars\n + ctx.files._jdk\n + ctx.files._zipper\n ),\n outputs = [output_jar],\n mnemonic = \"Clojurec\",\n command = \"set -e;\" + cmd,\n use_default_shell_env = True,\n )\n\n_clojure_binary_attrs = {\n \"main_class\": attr.string(mandatory=True),\n \"srcs\": attr.label_list(allow_files = [\".clj\"]),\n \"_clojure_jars\": attr.label_list(default=[\n Label(\"@org_clojure//jar\"),\n Label(\"@org_clojure_spec_alpha//jar\"),\n Label(\"@org_clojure_core_specs_alpha//jar\"),\n ]),\n \"_jdk\": attr.label(\n default = Label(\"//tools/defaults:jdk\"),\n ),\n \"_zipper\": attr.label(\n default = Label(\"@bazel_tools//tools/zip:zipper\"),\n executable = True,\n single_file = True,\n cfg = \"host\",\n ),\n}\n\n_clojure_binary_outputs = {\n \"jar\": \"%{name}.jar\",\n}\n\nclojure_binary = rule(\n implementation = _clojure_binary_impl,\n attrs = _clojure_binary_attrs,\n outputs = _clojure_binary_outputs,\n # executable = True,\n fragments = [\"java\"],\n)\n","repo_name":"samuraisam/rules_clojure","sub_path":"clojure/clojure_binary.bzl","file_name":"clojure_binary.bzl","file_ext":"bzl","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"10728281702","text":"import asyncio\nimport os\nfrom unittest import TestCase, skip\n\nimport aioboto3\nfrom aiobotocore import AioSession\nfrom botocore import credentials\n\nfrom src.async_kinesis_client.kinesis_consumer import AsyncKinesisConsumer\n\n\"\"\"\nTest against live AWS. Use at your own risk.\n\"\"\"\n\n\nclass LiveTestKinesisConsumer(TestCase):\n\n def setUp(self):\n try:\n self.event_loop = asyncio.get_event_loop()\n except RuntimeError:\n self.event_loop = asyncio.new_event_loop()\n\n working_dir = os.path.join(os.path.expanduser('~'), '.aws/cli/cache')\n\n session = AioSession(profile=os.environ.get('AWS_PROFILE'))\n provider = session.get_component('credential_provider').get_provider('assume-role')\n provider.cache = credentials.JSONFileCache(working_dir)\n\n aioboto3.setup_default_session(botocore_session=session)\n\n @skip\n # Use at your own risk\n def testConsumer(self):\n\n async def test():\n\n async def read_records(shard_reader):\n print('Shard reader for shard {}'.format(shard_reader.shard_id))\n c = 0\n try:\n async for records in shard_reader.get_records():\n for r in records:\n c += 1\n print('Record: {}'.format(r))\n if c == 10:\n print('{}: millis: {}'.format(shard_reader.shard_id, shard_reader.millis_behind_latest))\n return\n except Exception as e:\n print('Reader exited: {}'.format(e))\n\n consumer = AsyncKinesisConsumer(\n stream_name=os.environ.get('AIOKINESIS_STREAM'),\n checkpoint_table=os.environ.get('AIOKINESIS_CHECKPOINT_TABLE'),\n host_key=os.environ.get('AIOKINESIS_HOST_KEY')\n )\n print('Configured consumer: {}'.format(consumer))\n\n async def interruptor(kinesis_consumer):\n await asyncio.sleep(60)\n print('Stopping consumer')\n kinesis_consumer.stop()\n self.stopped = True\n\n self.stopped = False\n asyncio.ensure_future(interruptor(consumer))\n\n while not self.stopped:\n async for reader in consumer.get_shard_readers():\n print('Got shard reader for shard id: {}'.format(reader.shard_id))\n asyncio.ensure_future(read_records(reader))\n print('Consumer stopped')\n\n print('Starting live test')\n self.event_loop.run_until_complete(test())\n","repo_name":"whale2/async-kinesis-client","sub_path":"tests/live_test.py","file_name":"live_test.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"8054634213","text":"from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\nfrom django.utils import text\n\nfrom common.models import BaseModel\nfrom users.models import User\n\n\nclass Datalake(models.Model):\n class Meta:\n db_table = \"datalake\"\n managed = False\n\n id = models.UUIDField(primary_key=True)\n created_at = models.DateTimeField()\n logical_date = models.DateField()\n batch_id = models.TextField()\n src_alias = models.TextField()\n src_url = models.TextField()\n file = models.TextField()\n data = models.JSONField()\n data_normalized = models.JSONField()\n\n\nclass Datasource(BaseModel):\n # should match src_alias\n name = models.TextField(unique=True)\n slug = models.SlugField(blank=True, default=\"\")\n\n def save(self, *args, **kwargs):\n self.slug = text.slugify(self.name)\n super().save(*args, **kwargs)\n\n def __str__(self) -> str:\n return self.name\n\n\nclass Stream(BaseModel):\n datasource = models.ForeignKey(Datasource, on_delete=models.PROTECT)\n name = models.TextField()\n slug = models.SlugField(blank=True, default=\"\")\n selected_columns = ArrayField(models.TextField(), blank=True, default=list)\n\n def save(self, *args, **kwargs):\n self.slug = text.slugify(self.name)\n super().save(*args, **kwargs)\n\n def __str__(self) -> str:\n return f\"{self.datasource.name} | {self.name}\"\n\n\nclass Matching(BaseModel):\n left_stream = models.ForeignKey(Stream, on_delete=models.PROTECT, related_name=\"left_matchings\")\n right_stream = models.ForeignKey(Stream, on_delete=models.PROTECT, related_name=\"right_matchings\")\n\n logical_date = models.DateField()\n\n left_row_natural_id = models.TextField()\n left_row_data = models.JSONField()\n\n right_row_natural_id = models.TextField(null=True)\n right_row_data = models.JSONField(null=True)\n\n skipped = models.BooleanField(default=False)\n no_matching_row = models.BooleanField(default=False)\n\n created_by = models.ForeignKey(User, on_delete=models.PROTECT)\n","repo_name":"betagouv/data-inclusion","sub_path":"siretisation/django/matching/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"73218612745","text":"# Birthday problem\n# See https://math.stackexchange.com/questions/1804953/birthday-line-to-get-ticket-in-a-unique-setup for problem statement\n\n# Plotting for use later\nimport matplotlib.pyplot as plt\n\ndef birthday_line_problem():\n # prob to be updated as probability that person at line index i does not share a birthday with anyone in front of them\n # First person in line is guaranteed not to share birthday with anyone in front\n probs = [1]\n # q to be updated as prob that no one else shares a birthday with anyone in front of us except for us (we stand at positon i)\n # We want to maximize this probability\n # First person in line cannot share a birthday with anyone in front\n q = [0]\n # By pigeonhole principle, after 366 people, two are guaranteed to share a birthday (ignoring leap year)\n for i in range(1, 366):\n # q = 0, 1/365 * 365/365, 2/365 * 365/365 * 364/365, ...\n # q[i] = i/365 * probs[i-1]\n q.append(probs[i-1]*i/365)\n # probs = 365/365, 365/365 * 364/365, 365/365 * 364/365 * 363/365, ...\n # probs[i] = probs[i-1]*(365-n)/365\n probs.append(probs[i-1]*(365-i)/365)\n # Get index of max value of q, add 1 to compensate for 0 indexing\n # Also return q for plotting\n # This is where we want to stand!\n return q.index(max(q)) + 1, q\n\n\nif __name__ == \"__main__\":\n # Testing\n # Get best position and all probabilities\n best, q = birthday_line_problem()\n # Print best position\n print(\"The best position to stand in line is position {}\".format(best))\n # Plot probabilities; notice how the distribution peaks at position 20\n plt.plot(q)\n plt.show()\n","repo_name":"arcaputo3/algorithms","sub_path":"algos_and_data_structures/bday_line.py","file_name":"bday_line.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31157841694","text":"#!/usr/bin/python3\n\"\"\"This module defines class FileStorage.\"\"\"\n\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\nimport json\n\n\nclasses = {\n 'BaseModel': BaseModel,\n 'User': User,\n 'State': State,\n 'City': City,\n 'Amenity': Amenity,\n 'Place': Place,\n 'Review': Review\n }\n\n\nclass FileStorage():\n \"\"\"\n This class serializes instances to a JSON file and\n deserializes JSON file to instances.\n \"\"\"\n __file_path = 'file.json'\n __objects = {}\n\n def all(self):\n \"\"\" Returns the dictionary __objects.\"\"\"\n\n return self.__objects\n\n def new(self, obj):\n \"\"\"Sets in __objects the obj with key .id\"\"\"\n \n if obj is not None:\n key = obj.__class__.__name__ + \".\" + obj.id\n self.__objects[key] = obj\n\n def save(self):\n \"\"\" Serializes __objects to the JSON file (path: __file_path)\"\"\"\n\n json_dict = {}\n\n for key, value in self.__objects.items():\n json_dict[key] = value.to_dict()\n\n with open(self.__file_path, 'w') as f:\n json.dump(json_dict, f)\n\n def reload(self):\n \"\"\"\n deserializes the JSON file to __objects (only if the JSON\n file (__file_path) exists ; otherwise does nothing if the\n file doesn't exist.\n \"\"\"\n\n try:\n with open(self.__file_path, 'r') as f:\n pd = json.load(f)\n for key in pd:\n self.__objects[key] = classes[pd[key][\"__class__\"]](**pd[key])\n except FileNotFoundError:\n pass\n","repo_name":"Chiemelie10/AirBnB_Practice","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25246757765","text":"from pylab import *\r\nfrom scipy import *\r\nimport scipy.sparse.linalg as sla\r\nimport scipy.sparse as sparse\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom matplotlib import cm\r\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\r\nimport time\r\n\r\n\r\n#######################################\r\n# Specify h and lambda => Calculate k #\r\n#######################################\r\n\r\nh = 1/20.\r\nmu = 20\r\nk = h**2*mu\r\n\r\n########################\r\n# Develop (x,y,t) grid #\r\n########################\r\n\r\nx_dim = 1/h\r\ny_dim = x_dim\r\nt_dim = 1/k\r\n\r\nx = np.linspace(0,1,x_dim)\r\ny = np.linspace(0,1,y_dim)\r\nt = np.linspace(0,0.8,t_dim)\r\n\r\n################################################\r\n# Specify initial data and boundary conditions #\r\n################################################\r\n\r\nic = mat(zeros((x_dim,y_dim)))\r\n\r\nfor i in range (0,len(x)):\r\n for j in range (0,len(y)):\r\n if (x[i]-0.5)**2+(y[j]-0.5)**2 >= 1/16. and (x[i]-0.5)**2+(y[j]-0.5)**2 <= 1/8.:\r\n ic[i,j] = 5\r\n\r\n\r\n##########################\r\n# Plot initial condition #\r\n##########################\r\n\r\nfig = plt.figure(1)\r\naxis = fig.gca(projection='3d')\r\nX, Y = np.meshgrid(x, y)\r\nsurf = axis.plot_surface(X, Y, ic, rstride=1, cstride=1, cmap=cm.winter,\r\n linewidth=0, antialiased=False)\r\nplt.show()\r\n\r\n\r\n###########################################\r\n# Construct diagonal matrix for inversion #\r\n###########################################\r\n\r\nonez = np.ones((x_dim*y_dim,))\r\nA = sparse.spdiags(np.vstack((-mu*onez,-mu*onez/2,(1+3*mu)*onez,-mu*onez/2,-mu*onez)), np.array([-x_dim,-1,0,1,x_dim]),x_dim*y_dim,x_dim*y_dim).todense()\r\nB = sparse.spdiags(np.vstack((mu*onez,mu*onez/2,(1-3*mu)*onez,mu*onez/2,mu*onez)), np.array([-x_dim,-1,0,1,x_dim]),x_dim*y_dim,x_dim*y_dim).todense()\r\n\r\n\r\n##############################################\r\n# Establish boundary condtions for each edge #\r\n##############################################\r\n'''\r\n(a,b) correspond to (x,y) when establishing the bounday conditions\r\nThe first two loops establish the Neumann BC's\r\nThe secton two loops establish the Dirichlet BC's\r\n'''\r\n\r\nfor a in range(0,len(x)):\r\n A[a,a+x_dim] = A[a,a+x_dim] - mu\r\n B[a,a+x_dim] = B[a,a+x_dim] + mu\r\nfor b in range(0,len(y)):\r\n if not (b == 0):\r\n A[b*y_dim,b*y_dim + 1] = A[b*y_dim,b*y_dim + 1] - mu/2\r\n A[b*y_dim,b*y_dim - 1] = 0\r\n B[b*y_dim,b*y_dim + 1] = B[b*y_dim,b*y_dim + 1] + mu/2\r\n B[b*y_dim,b*y_dim - 1] = 0\r\n else:\r\n A[b*y_dim,b*y_dim + 1] = A[b*y_dim,b*y_dim + 1] - mu/2\r\n B[b*y_dim,b*y_dim + 1] = B[b*y_dim,b*y_dim + 1] + mu/2\r\nfor a in range(0,len(x)):\r\n A[(y_dim-1)*x_dim + a,(y_dim - 1)*x_dim + a] = 10**20\r\n B[(y_dim-1)*x_dim + a,(y_dim - 1)*x_dim + a] = 10**20\r\nfor b in range(0,len(y)):\r\n A[(b+1)*x_dim - 1,(b+1)*x_dim - 1] = 10**20\r\n B[(b+1)*x_dim - 1,(b+1)*x_dim - 1] = 10**20\r\n\r\n\r\nu = mat(zeros((x_dim*y_dim,t_dim)))\r\nu[:,0] = ic.reshape(x_dim*y_dim,1)\r\nfor i in range (1,len(t)):\r\n u[:,i] = sparse.linalg.spsolve(A,B*u[:,i-1]).reshape(x_dim*y_dim,1)\r\n\r\n\r\n\r\n\r\n#######################################\r\n# Plot results for various time-steps #\r\n#######################################\r\n\r\nfig = plt.figure(2)\r\naxis = fig.gca(projection='3d')\r\nX, Y = np.meshgrid(x, y)\r\nsurf = axis.plot_surface(X, Y, u[:,1/k/4].reshape(x_dim,y_dim) , rstride=1, cstride=1, cmap=cm.winter, linewidth=0, antialiased=False)\r\nplt.show()\r\n\r\nfig = plt.figure(3)\r\naxis = fig.gca(projection='3d')\r\nX, Y = np.meshgrid(x, y)\r\nsurf = axis.plot_surface(X, Y, u[:,1/k/2].reshape(x_dim,y_dim) , rstride=1, cstride=1, cmap=cm.winter, linewidth=0, antialiased=False)\r\nplt.show()\r\n\r\nfig = plt.figure(4)\r\naxis = fig.gca(projection='3d')\r\nX, Y = np.meshgrid(x, y)\r\nsurf = axis.plot_surface(X, Y, u[:,3/k/4].reshape(x_dim,y_dim) , rstride=1, cstride=1, cmap=cm.winter, linewidth=0, antialiased=False)\r\nplt.show()\r\n\r\nfig = plt.figure(4)\r\naxis = fig.gca(projection='3d')\r\nX, Y = np.meshgrid(x, y)\r\nsurf = axis.plot_surface(X, Y, u[:,1/k-1].reshape(x_dim,y_dim) , rstride=1, cstride=1, cmap=cm.winter, linewidth=0, antialiased=False)\r\nplt.show()\r\n\r\n \r\n","repo_name":"czhang49/CodeRepo","sub_path":"Numerical Methods to PDEs/HW2/02_04.py","file_name":"02_04.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"686635357","text":"import arcade\nfrom random import *\n\nscreen_width = 640\nscreen_height = 640\nscreen_title = \"Journey of the Prairie King\"\nsprite_scaling = 1\n\n\nclass Character:\n def __init__(self, pos_x, pos_y, number_of_hearts, speed):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.speed = speed\n self.number_of_hearts = number_of_hearts\n self.dead = False\n\n def get_number_of_hearts(self):\n \"\"\"\n Devuelve el numero de corazones o \"vidas\" del personaje\n None -> int\n \"\"\"\n return self.number_of_hearts\n\n def sub_heart(self, number_of_hearts): # los hits que aguantan los enemigos son como sus \"vidas\"\n \"\"\"\n Quita un corazon del total de corazones\n Int -> None\n \"\"\"\n self.number_of_hearts -= 1\n if self.number_of_hearts <= 0:\n self.dead = True\n\n\nclass MainCharacter(Character):\n def __init__(self, pos_x, pos_y, number_of_hearts, speed): # muere de un golpe\n\n super().__init__(pos_x, pos_y, number_of_hearts, speed)\n\n self.money = 0\n self.weapon = Weapon # not sure how to do this\n self.right = False\n self.left = False\n self.up = False\n self.down = False\n\n def get_money(self):\n \"\"\"\n Retorna el dinero del personaje\n \"\"\"\n return self.money\n\n def get_speed(self):\n \"\"\"\n Retorna la velocidad del personaje\n \"\"\"\n return self.speed\n\n def set_speed(self, new_speed):\n \"\"\"\n Cambia la velocidad del personaje\n \"\"\"\n self.speed = new_speed\n\n def add_heart(self, number_of_hearts):\n \"\"\"\n Suma un corazon al total de corazones\n Int -> None\n \"\"\"\n self.number_of_hearts += 1\n\n\nclass Enemy(Character):\n def __init__(self, pos_x, pos_y, number_of_hearts, speed):\n super().__init__(pos_x, pos_y, number_of_hearts, speed)\n\n self.drop_list = [\"\",\"\",\"\"] # items, todavía por definir, lista de objetos (clase)\n\n def drop(self):\n\n \"\"\"\n Elige un item al azar del drop list y lo retorna\n None -> String\n \"\"\"\n\n return self.drop_list[randint(0, len(self.drop_list) - 1)] # no tengo en cuenta droprate\n\n\nclass Item:\n def __init__(self, price):\n self.price = price\n\n\nclass Weapon(Item):\n def __init__(self, name, price, dmg, weapon_type):\n super().__init__(price)\n self.dmg = dmg\n self.name = name\n self.powerUp_list = [\"Caja de vacunas\", \"Lejía\", \"Aguja nueva\"]\n self.weapon_type = weapon_type\n\n def has_power_up(self):\n \"\"\"\n Checkea si un arma tiene powerUp\n String -> boolean\n \"\"\"\n pass\n\n def apply_power_up(self, power_up):\n \"\"\"\n Aplica powerUp a arma\n String -> None\n \"\"\"\n\n\nclass Consumable(Item):\n def __init__(self, name):\n super().__init__(name)\n\n def apply_consumable(self, consumable):\n \"\"\"\n Aplica objeto consumible\n String -> None\n \"\"\"\n pass\n\n\nclass Bullet(arcade.Sprite):\n def update(self):\n self.center_y += self.change_y\n self.center_x += self.change_x\n\n\nclass Game(arcade.Window):\n def __init__(self):\n super().__init__(screen_width, screen_height, screen_title)\n\n # lists\n self.player_list = None\n self.bullet_list = None\n self.enemy_list = None\n\n # main character, bullet and enemy\n self.player = None\n self.bullet_shoot = None\n self.enemy = None\n\n # character, bullet and enemy sprites\n self.player_sprite = None\n self.bullet_sprite = None\n self.enemy_sprite = None\n\n # map sprites\n self.paredes = None\n self.suelo = None\n self.cosas = None\n self.obstaculos = None\n self.obstaculos_2 = None\n self.perfeccionar = None\n self.cuerpos = None\n self.sangre = None\n\n # number of the room the player is\n self.current_room = 0\n\n # physics between enemies\n self.physics_engine_enemy = None\n self.physics_engine_enemy1 = None\n self.physics_engine_enemy2 = None\n\n # collision bullet - enemy\n self.collision_bullet_enemy = None\n\n def setup(self):\n \"\"\"\n Set up the game and initialize the variables. Call this function to restart the game\n \"\"\"\n # Set up the lists\n self.player_list = arcade.SpriteList()\n self.bullet_list = arcade.SpriteList()\n self.enemy_list = arcade.SpriteList()\n self.physics_engine_enemy = arcade.SpriteList()\n self.physics_engine_enemy1 = arcade.SpriteList()\n self.physics_engine_enemy2 = arcade.SpriteList()\n\n # The entrances is the first room\n self.entrance()\n\n # Set up the player\n self.player = MainCharacter(screen_width / 2, 305, 3, 360)\n self.player_sprite = arcade.Sprite(\n \"mapas/personajes/protagonista.png\",\n sprite_scaling, center_x=self.player.pos_x, center_y=self.player.pos_y)\n\n self.player_list.append(self.player_sprite)\n\n # Rooms created\n def entrance(self):\n my_map = arcade.tilemap.read_tmx(\"mapas/archivos tsx/entrada.tmx\")\n\n self.paredes = arcade.tilemap.process_layer(my_map, \"paredes\", 1)\n self.suelo = arcade.tilemap.process_layer(my_map, \"suelo \", 1)\n self.cosas = arcade.tilemap.process_layer(my_map, \"cosas\", 1)\n self.obstaculos = arcade.tilemap.process_layer(my_map, \"obstaculos\", 1)\n\n # Enemies\n # Enemies 2\n self.enemy = Enemy(640, 305, 2, 3)\n self.enemy_sprite = arcade.Sprite(\"mapas/personajes/enemigo 2.png\", 1, center_x=self.enemy.pos_x, center_y=self.enemy.pos_y)\n self.enemy_sprite.speed = self.enemy.speed\n self.enemy_sprite.number_of_hearts = self.enemy.number_of_hearts\n self.enemy_list.append(self.enemy_sprite)\n self.physics_engine_enemy=(arcade.PhysicsEngineSimple(self.enemy_sprite, self.enemy_list))\n\n # Enemies 3\n self.enemy = Enemy(0, 500, 4, 1)\n self.enemy_sprite = arcade.Sprite(\"mapas/personajes/enemigo 3.png\", 1, center_x=self.enemy.pos_x, center_y=self.enemy.pos_y)\n self.enemy_sprite.speed = self.enemy.speed\n self.enemy_sprite.number_of_hearts = self.enemy.number_of_hearts\n self.enemy_list.append(self.enemy_sprite)\n self.physics_engine_enemy1 =(arcade.PhysicsEngineSimple(self.enemy_sprite, self.enemy_list))\n\n # Enemies 1\n self.enemy = Enemy(0, 305, 2, 2)\n self.enemy_sprite = arcade.Sprite(\"mapas/personajes/enemigo 1.png\", 1, center_x=self.enemy.pos_x, center_y=self.enemy.pos_y)\n self.enemy_sprite.speed = self.enemy.speed\n self.enemy_sprite.number_of_hearts = self.enemy.number_of_hearts\n self.enemy_list.append(self.enemy_sprite)\n self.physics_engine_enemy2 = (arcade.PhysicsEngineSimple(self.enemy_sprite, self.enemy_list))\n\n def room_1(self):\n\n my_map = arcade.tilemap.read_tmx(\"mapas/archivos tsx/planta 1.tmx\")\n\n self.paredes = arcade.tilemap.process_layer(my_map, \"paredes\", 1)\n self.suelo = arcade.tilemap.process_layer(my_map, \"suelo \", 1)\n self.obstaculos_2 = arcade.tilemap.process_layer(my_map, \"obstaculos 2\", 1)\n self.obstaculos = arcade.tilemap.process_layer(my_map, \"obstaculos\", 1)\n self.perfeccionar = arcade.tilemap.process_layer(my_map, \"perfeccionar\", 1)\n\n def room_2(self):\n my_map = arcade.tilemap.read_tmx(\"mapas/archivos tsx/planta 2.tmx\")\n\n self.paredes = arcade.tilemap.process_layer(my_map, \"paredes\", 1)\n self.suelo = arcade.tilemap.process_layer(my_map, \"suelo \", 1)\n self.obstaculos_2 = arcade.tilemap.process_layer(my_map, \"obstaculos2\", 1)\n self.obstaculos = arcade.tilemap.process_layer(my_map, \"obstaculos\", 1)\n self.perfeccionar = arcade.tilemap.process_layer(my_map, \"perfeccionar\", 1)\n self.cuerpos = arcade.tilemap.process_layer(my_map, \"cuerpos\", 1)\n self.sangre = arcade.tilemap.process_layer(my_map, \"sangre\", 1)\n\n def room_3(self):\n my_map = arcade.tilemap.read_tmx(\"mapas/archivos tsx/planta 3.tmx\")\n\n self.paredes = arcade.tilemap.process_layer(my_map, \"paredes\", 1)\n self.suelo = arcade.tilemap.process_layer(my_map, \"suelo \", 1)\n self.sangre = arcade.tilemap.process_layer(my_map, \"sangre\", 1)\n self.obstaculos = arcade.tilemap.process_layer(my_map, \"obstaculos\", 1)\n self.cuerpos = arcade.tilemap.process_layer(my_map, \"cuerpos\", 1)\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\" # collisions go here\n\n # Update of the room where the player is\n # Going up stairs\n\n if self.player_sprite.center_y > 630 and 326 < self.player_sprite.center_x < 376 and self.current_room == 0:\n self.current_room = 1\n self.room_1()\n self.player_sprite.center_y = 620\n self.player_sprite.center_x = 416\n\n if self.player_sprite.center_y > 635 and 298 < self.player_sprite.center_x < 343 and self.current_room == 1:\n self.current_room = 2\n self.room_2()\n self.player_sprite.center_y = 30\n self.player_sprite.center_x = 288\n\n if self.player_sprite.center_y > 635 and 256 < self.player_sprite.center_x < 320 and self.current_room == 2:\n self.current_room = 3\n self.room_3()\n self.player_sprite.center_y = 30\n self.player_sprite.center_x = 256\n\n # Going down stairs\n if self.player_sprite.center_y < 10 and 224 < self.player_sprite.center_x < 288 and self.current_room == 3:\n self.current_room = 2\n self.room_2()\n self.player_sprite.center_y = 620\n self.player_sprite.center_x = 288\n\n if self.player_sprite.center_y < 10 and 256 < self.player_sprite.center_x < 320 and self.current_room == 2:\n self.current_room = 1\n self.room_1()\n self.player_sprite.center_y = 620\n self.player_sprite.center_x = 320\n\n if self.player_sprite.center_y > 635 and 389 < self.player_sprite.center_x < 438 and self.current_room == 1:\n self.current_room = 0\n self.entrance()\n self.player_sprite.center_y = 620\n self.player_sprite.center_x = 352\n\n # main character movement\n if self.player.right:\n self.player_sprite.center_x += self.player.speed * delta_time\n if self.player.left:\n self.player_sprite.center_x -= self.player.speed * delta_time\n if self.player.up:\n self.player_sprite.center_y += self.player.speed * delta_time\n if self.player.down:\n self.player_sprite.center_y -= self.player.speed * delta_time\n\n # collisions with screen borders (not finished)\n if self.player_sprite.center_x + 27 >= screen_width:\n self.player_sprite.center_x = screen_width - 27\n if self.player_sprite.center_x <= 0:\n self.player_sprite.center_x = 0\n if self.player_sprite.center_y >= screen_height:\n self.player_sprite.center_y = screen_height\n if self.player_sprite.center_y <= 0:\n self.player_sprite.center_y = 0\n\n # make the bullet shoot\n if self.bullet_shoot:\n self.shoot()\n\n # collision of the bullet with enemy\n for self.bullet_sprite in self.bullet_list:\n collision_bullet_enemy = arcade.check_for_collision_with_list(self.bullet_sprite, self.enemy_list)\n if len(collision_bullet_enemy) > 0:\n self.bullet_sprite.remove_from_sprite_lists()\n\n # enemy actualization of hearts\n for self.enemy_sprite in collision_bullet_enemy:\n if self.enemy_sprite.number_of_hearts > 0:\n self.enemy_sprite.number_of_hearts -= 1\n if self.enemy_sprite.number_of_hearts == 0:\n self.enemy_sprite.remove_from_sprite_lists()\n\n # List to make the enemy goes to the coordenates of the player\n for self.enemy_sprite in self.enemy_list:\n self.movimiento(self.enemy_sprite, self.player_sprite)\n\n # update player position\n self.physics_engine_enemy.update()\n self.physics_engine_enemy1.update()\n self.physics_engine_enemy2.update()\n self.player_list.update()\n self.bullet_list.update()\n\n def shoot(self):\n\n # Bullet creation\n bullet = Bullet(\"bullet.png\", 0.5)\n bullet.center_x = self.player_sprite.center_x\n bullet.center_y = self.player_sprite.center_y\n self.bullet_list.append(bullet)\n bullet.speed = 8\n\n # Direction of the bullet\n if self.player.right:\n bullet.change_x = bullet.speed\n bullet.change_y = 0\n bullet.angle = 0\n if self.player.down:\n bullet.change_x = 0\n bullet.change_y = -bullet.speed\n bullet.angle = 270\n if self.player.left:\n bullet.change_x = -bullet.speed\n bullet.change_y = 0\n bullet.angle = 180\n if self.player.up:\n bullet.change_x = 0\n bullet.change_y = bullet.speed\n bullet.angle = 90\n if self.player.down and self.player.left:\n bullet.change_x = -bullet.speed / 2\n bullet.change_y = -bullet.speed / 2\n bullet.angle = 225\n if self.player.down and self.player.right:\n bullet.change_x = bullet.speed / 2\n bullet.change_y = -bullet.speed / 2\n bullet.angle = 315\n if self.player.up and self.player.left:\n bullet.change_x = -bullet.speed / 2\n bullet.change_y = bullet.speed / 2\n bullet.angle = 135\n if self.player.up and self.player.right:\n bullet.change_x = bullet.speed / 2\n bullet.change_y = bullet.speed / 2\n bullet.angle = 45\n if not self.player.up and not self.player.down and not self.player.right and not self.player.left:\n bullet.change_x = 0\n bullet.change_y = bullet.speed\n bullet.angle = 90\n\n def movimiento(self, enemy, player):\n\n # Movement\n if enemy.center_x < player.center_x:\n enemy.center_x += enemy.speed\n if enemy.center_x > player.center_x:\n enemy.center_x -= enemy.speed\n if enemy.center_y < player.center_y:\n enemy.center_y += enemy.speed\n if enemy.center_y > player.center_y:\n enemy.center_y -= enemy.speed\n\n def on_draw(self):\n \"\"\"\n Render the screen\n \"\"\"\n arcade.start_render()\n\n # draw background\n # arcade.draw_lrwh_rectangle_textured(0, 0, screen_width, screen_height, self.background_image)\n\n # draw of the map\n # Room entrance\n if self.current_room == 0:\n self.paredes.draw()\n self.suelo.draw()\n self.cosas.draw()\n self.obstaculos.draw()\n\n # Room 1\n if self.current_room == 1:\n self.paredes.draw()\n self.suelo.draw()\n self.obstaculos.draw()\n self.obstaculos_2.draw()\n self.perfeccionar.draw()\n\n # Room 2\n if self.current_room == 2:\n self.paredes.draw()\n self.suelo.draw()\n self.obstaculos.draw()\n self.obstaculos_2.draw()\n self.perfeccionar.draw()\n self.sangre.draw()\n self.cuerpos.draw()\n\n # Room 3\n if self.current_room == 3:\n self.paredes.draw()\n self.suelo.draw()\n self.obstaculos.draw()\n self.sangre.draw()\n self.cuerpos.draw()\n\n # draw all sprites\n self.player_list.draw()\n self.bullet_list.draw()\n self.enemy_list.draw()\n\n def on_key_press(self, key, modifiers):\n\n # move character\n if key == arcade.key.W:\n self.player.up = True\n if key == arcade.key.A:\n self.player.left = True\n if key == arcade.key.S:\n self.player.down = True\n if key == arcade.key.D:\n self.player.right = True\n\n # to shoot\n if key == arcade.key.SPACE:\n self.shoot()\n\n def on_key_release(self, key, modifiers):\n if key == arcade.key.W:\n self.player.up = False\n if key == arcade.key.A:\n self.player.left = False\n if key == arcade.key.S:\n self.player.down = False\n if key == arcade.key.D:\n self.player.right = False\n\n\ndef main():\n game = Game()\n game.setup()\n arcade.run()\n\n\nmain()\n","repo_name":"TecnologiaVideojuegos/proyectovideojuego-equipo-d","sub_path":"vired/tests/Pruebas desarrollo.py","file_name":"Pruebas desarrollo.py","file_ext":"py","file_size_in_byte":16769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70234294666","text":"import sys, os\nimport logging\nimport numpy as np\nimport colorsys\n\n\n# Current path\ncur_path = os.path.dirname(os.path.realpath(os.path.basename(__file__)))\n\n# Logging\nlogger = logging.getLogger(\"SRL Bench\")\nlogger.setLevel(logging.DEBUG)\nlogger.propagate = False\n\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('%(asctime)s\\t(%(name)s)\\t[%(levelname)s]\\t%(message)s')\n\nch.setFormatter(formatter)\n\nlogger.addHandler(ch)\n\n\n# Normal random tensor generation\ndef randn(*args): return np.random.randn(*args).astype('f')\n\nclass Batch_Loader(object):\n def __init__(self, train_triples, words_indexes, indexes_words, headTailSelector, \\\n entity2id, id2entity, relation2id, id2relation, batch_size=100, neg_ratio=1.0):\n self.train_triples = train_triples\n self.indexes = np.array(list(self.train_triples.keys())).astype(np.int32)\n self.values = np.array(list(self.train_triples.values())).astype(np.float32)\n self.batch_size = batch_size\n self.words_indexes = words_indexes\n self.indexes_words = indexes_words # heads, relations, tails are also considered as words\n self.n_words = len(self.indexes_words)\n self.neg_ratio = int(neg_ratio)\n self.headTailSelector = headTailSelector\n self.relation2id = relation2id\n self.id2relation = id2relation\n self.entity2id = entity2id\n self.id2entity = id2entity\n\n self.indexes_rels = {}\n self.indexes_ents = {}\n for _word in self.words_indexes:\n index = self.words_indexes[_word]\n if _word in self.relation2id:\n self.indexes_rels[index] = _word\n elif _word in self.entity2id:\n self.indexes_ents[index] = _word\n\n self.new_triples_indexes = np.empty((self.batch_size * (self.neg_ratio + 1), 3)).astype(np.int32)\n self.new_triples_values = np.empty((self.batch_size * (self.neg_ratio + 1), 1)).astype(np.float32)\n\n def __call__(self):\n\n idxs = np.random.randint(0, len(self.values), self.batch_size)\n self.new_triples_indexes[:self.batch_size, :] = self.indexes[idxs, :]\n self.new_triples_values[:self.batch_size] = self.values[idxs, :]\n\n last_idx = self.batch_size\n\n if self.neg_ratio > 0:\n\n # Pre-sample everything, faster\n rdm_words = np.random.randint(0, self.n_words, last_idx * self.neg_ratio)\n # Pre copying everyting\n self.new_triples_indexes[last_idx:(last_idx * (self.neg_ratio + 1)), :] = np.tile(\n self.new_triples_indexes[:last_idx, :], (self.neg_ratio, 1))\n self.new_triples_values[last_idx:(last_idx * (self.neg_ratio + 1))] = np.tile(\n self.new_triples_values[:last_idx], (self.neg_ratio, 1))\n\n for i in range(last_idx):\n for j in range(self.neg_ratio):\n cur_idx = i * self.neg_ratio + j\n tmpRel = self.indexes_words[self.new_triples_indexes[last_idx + cur_idx, 1]]\n tmpIndexRel = self.relation2id[tmpRel]\n pr = self.headTailSelector[tmpIndexRel]\n\n # Sample a random subject or object\n if (np.random.randint(np.iinfo(np.int32).max) % 1000) > pr:\n while (rdm_words[cur_idx] in self.indexes_rels or (\n rdm_words[cur_idx], self.new_triples_indexes[last_idx + cur_idx, 1],\n self.new_triples_indexes[last_idx + cur_idx, 2]) in self.train_triples):\n rdm_words[cur_idx] = np.random.randint(0, self.n_words)\n self.new_triples_indexes[last_idx + cur_idx, 0] = rdm_words[cur_idx]\n else:\n while (rdm_words[cur_idx] in self.indexes_rels or (\n self.new_triples_indexes[last_idx + cur_idx, 0],\n self.new_triples_indexes[last_idx + cur_idx, 1],\n rdm_words[cur_idx]) in self.train_triples):\n rdm_words[cur_idx] = np.random.randint(0, self.n_words)\n self.new_triples_indexes[last_idx + cur_idx, 2] = rdm_words[cur_idx]\n\n self.new_triples_values[last_idx + cur_idx] = [-1]\n\n last_idx += cur_idx + 1\n\n return self.new_triples_indexes[:last_idx, :], self.new_triples_values[:last_idx]\n\n","repo_name":"daiquocnguyen/ConvKB","sub_path":"ConvKB_tf/batching.py","file_name":"batching.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"81"} +{"seq_id":"23127083417","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport pytorch_lightning as pl\nimport torchaudio\n# import torchmetrics\nfrom torchmetrics import Accuracy, F1Score, ConfusionMatrix\nfrom transformers import AutoModel\nimport numpy as np\nfrom abc import abstractmethod\n# from torchsummary import summary\n\n\nclass BaseModel(pl.LightningModule):\n \"\"\"\n Base class for all models\n \"\"\"\n\n @abstractmethod\n def forward(self, *inputs):\n \"\"\"\n Forward pass logic\n\n :return: Model output\n \"\"\"\n raise NotImplementedError\n\n def __str__(self):\n \"\"\"\n Model # prints with number of trainable parameters\n \"\"\"\n model_parameters = filter(lambda p: p.requires_grad, self.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n\n return super().__str__() + '\\nTrainable parameters: {}'.format(params)\n\n def get_feature(self, z):\n _, feature = self.forward(z)\n if type(feature) == tuple:\n feature = feature[0]\n return feature\n\n def predict(self, x):\n \"\"\"\n\n :param x: input tensor (torch.Tensor)\n :return: single output of model (numpy.array)\n \"\"\"\n\n self.eval()\n out, _ = self.forward(x)\n out = torch.argmax(out, dim=1)\n out = out.cpu().detach().numpy().copy()\n # out = np.squeeze(out)\n return out\n\n def predict_proba(self, x):\n \"\"\"\n\n :param x: input tensor (torch.Tensor)\n :return: single output of model (numpy.array)\n \"\"\"\n self.eval()\n out, _ = self.forward(x)\n out = F.softmax(out, dim=1) # assuming logits has the shape [batch_size, nb_classes]\n out = out.cpu().detach().numpy().copy()\n out = np.squeeze(out)\n return out\n\nclass CRNN(pl.LightningModule):\n \"\"\"\n Baseline model \n borrowed from \n https://github.com/bill317996/Singer-identification-in-artist20/blob/master\n \"\"\"\n def __init__(self, conf, classes_num):\n super().__init__()\n self.lr = conf.lr\n self.num_classes = classes_num\n self.elu = nn.ELU()\n self.softmax = nn.Softmax(dim=1)\n\n self.audio = torchaudio.transforms.MelSpectrogram(sample_rate=conf.sr,\n n_mels=128,\n n_fft=2048,\n hop_length=512)\n self.amplitude_to_db = torchaudio.transforms.AmplitudeToDB(top_db=80)\n\n self.Conv1 = nn.Conv2d(1, 64, (3,3))\n self.Bn1 = nn.BatchNorm2d(64)\n self.mp1 = nn.MaxPool2d((2,2), stride=(2,2))\n self.drop1 = nn.Dropout2d(p=0.1)\n\n self.Conv2 = nn.Conv2d(64, 128, (3,3))\n self.Bn2 = nn.BatchNorm2d(128)\n self.mp2 = nn.MaxPool2d((4,2), stride=(2,4))\n self.drop2 = nn.Dropout2d(p=0.1)\n\n self.Conv3 = nn.Conv2d(128, 128, (3,3))\n self.Bn3 = nn.BatchNorm2d(128)\n self.mp3 = nn.MaxPool2d((4,2), stride=(4,4))\n self.drop3 = nn.Dropout2d(p=0.1)\n\n self.Conv4 = nn.Conv2d(128, 128, (3,3))\n self.Bn4 = nn.BatchNorm2d(128)\n self.mp4 = nn.MaxPool2d((4,2), stride=(4,4))\n self.drop4 = nn.Dropout2d(p=0.1)\n\n self.gru1 = nn.GRU(128, 32, num_layers=1, batch_first=True)\n self.gru2 = nn.GRU(32, 32, num_layers=1, batch_first=True)\n self.drop5 = nn.Dropout(p=0.3)\n\n self.linear1 = nn.Linear(32, classes_num)\n\n self.train_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n self.val_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n self.test_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n self.test_top2 = Accuracy(num_classes=self.num_classes, average='macro', top_k=2, task='multiclass')\n self.test_top3 = Accuracy(num_classes=self.num_classes, average='macro', top_k=3, task='multiclass')\n self.test_f1 = F1Score(num_classes=self.num_classes, average='macro', task='multiclass')\n self.confusion = ConfusionMatrix(num_classes=self.num_classes, task='multiclass')\n\n def forward(self, x):\n x = self.audio(x)\n x = self.amplitude_to_db(x)\n # x = torch.permute(x, (0,1,3,2))\n\n x = self.drop1(self.mp1(self.Bn1(self.elu(self.Conv1(x)))))\n\n x = self.drop2(self.mp2(self.Bn2(self.elu(self.Conv2(x)))))\n\n x = self.drop3(self.mp3(self.Bn3(self.elu(self.Conv3(x)))))\n\n x = self.drop4(self.mp4(self.Bn4(self.elu(self.Conv4(x)))))\n\n x = x.transpose(1, 3)\n x = torch.reshape(x, (x.size(0),x.size(1),-1))\n\n x, _ = self.gru1(x)\n x, _ = self.gru2(x)\n x = self.drop5(x)\n\n x = torch.reshape(x, (x.size(0), -1))\n emb = x\n x = self.linear1(x)\n return x, emb\n\n def training_step(self, train_batch, batch_idx):\n x, y = train_batch\n # print(x.shape)\n out,_ = self(x)\n loss = F.cross_entropy(out, y)\n self.log('train_loss', loss, on_epoch=True, on_step=False)\n self.log('train_acc', self.train_acc(out, y), on_step=False, on_epoch=True)\n return loss\n\n def validation_step(self, val_batch, batch_idx):\n x, y = val_batch\n out,_ = self(x)\n loss = F.cross_entropy(out, y)\n self.log('val_loss', loss, on_epoch=True, on_step=False)\n self.log('val_acc', self.val_acc(out, y), on_step=False, on_epoch=True)\n return loss\n\n def test_step(self, test_batch, batch_idx):\n x, y = test_batch\n out,_ = self(x)\n self.log('test_accuracy', self.test_acc(out,y), on_epoch=True, on_step=False)\n self.log('test_f1', self.test_f1(out, y), on_epoch=True, on_step=False)\n self.log('test_top2_accuracy', self.test_top2(out, y), on_epoch=True, on_step=False)\n self.log('test_top3_accuracy', self.test_top3(out, y), on_epoch=True, on_step=False)\n self.log('test_confusion', self.confusion(out, y), on_epoch=False, on_step=False)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n return optimizer\n\n def predict(self, x):\n self.eval()\n out, _ = self.forward(x)\n out = torch.argmax(out, dim=1)\n out = out.cpu().detach().numpy().copy()\n # out = np.squeeze(out)\n return out\n\n def predict_proba(self, x):\n \"\"\"\n\n :param x: input tensor (torch.Tensor)\n :return: single output of model (numpy.array)\n \"\"\"\n self.eval()\n out, _ = self.forward(x)\n out = torch.softmax(out, dim=1) # assuming logits has the shape [batch_size, nb_classes]\n out = out.cpu().detach().numpy().copy()\n out = np.squeeze(out)\n return out\n\n\nclass HuggingfaceFrontend(nn.Module):\n def __init__(self, url, use_last=False, encoder_size=12):\n super().__init__()\n self.model = AutoModel.from_pretrained(url, trust_remote_code=True)\n self.use_last = use_last\n if encoder_size == 12:\n self.layer_weights = torch.nn.parameter.Parameter(data=torch.ones(13), requires_grad=True)\n elif encoder_size == 24:\n self.layer_weights = torch.nn.parameter.Parameter(data=torch.ones(25), requires_grad=True)\n\n def forward(self,x):\n x = self.model(x, output_hidden_states=True, return_dict=None, output_attentions=None)\n if self.use_last:\n h = x[\"last_hidden_state\"]\n pad_width = (0, 0, 0, 1)\n h = F.pad(h, pad_width, mode='reflect')\n else:\n h = x[\"hidden_states\"]\n h = torch.stack(h, dim=3)\n pad_width = (0, 0, 0, 0, 0, 1)\n h = F.pad(h, pad_width, mode='reflect')\n if not self.use_last:\n weights = torch.softmax(self.layer_weights,dim=0)\n # x = x.transpose(1,3) # (B, Emb, Time, Ch) * (Ch, 1)\n h = torch.matmul(h, weights)\n return h\n\n def fix_parameter(self,freeze_all=False):\n if freeze_all:\n for param in self.model.parameters():\n param.requires_grad = False\n else:\n self.model.feature_extractor._freeze_parameters()\n\n def unfreeze_parameter(self):\n for param in self.model.parameters():\n param.requires_grad = True\n self.model.feature_extractor._freeze_parameters()\n\n def get_layer_weights(self):\n lw = torch.softmax(self.layer_weights,dim=0)\n lw = lw.detach().cpu().numpy().copy()\n return lw\n\nclass Backend(nn.Module):\n def __init__(self, class_size, encoder_size=12, frame=False) -> None:\n super().__init__()\n assert encoder_size == 12 or encoder_size == 24\n if encoder_size == 12:\n self.feature_dim = 768\n elif encoder_size == 24:\n self.feature_dim = 1024\n else:\n raise NotImplementedError\n self.proj = nn.Linear(self.feature_dim, self.feature_dim)\n self.dropout = nn.Dropout(0.5)\n self.classifier = nn.Linear(self.feature_dim, class_size)\n self.frame = frame\n\n def forward(self, x):\n input_size = self.feature_dim\n # if len(x.shape) == 4 and self.combine_dims:\n # input_size = input_shape[2] * input_shape[3]\n x = self.proj(x)\n if not self.frame:\n x = x.mean(1, False)\n feature = x\n x = self.dropout(x)\n x = self.classifier(x)\n return x, feature\n\nclass SSLNet(BaseModel):\n def __init__(self,\n conf,\n weights:dict or list=None,\n class_num=10,\n weight_sum=False\n ):\n super().__init__()\n\n self.num_classes = class_num\n self.lr = conf.lr\n self.url = conf.url\n self.freeze_all = conf.freeze_all\n encode_size = 24 if \"large\" in self.url else 12\n # if param.sr != 16000:\n # self.resampler = torchaudio.transforms.Resample(orig_freq=param.sr, new_freq=16000)\n # else:\n # self.resampler = nn.Identity()\n \n # self.frontend = AutoModel.from_pretrained(self.url, trust_remote_code=True,cache_dir='./hfmodels')\n \n # for p in self.frontend.parameters():\n # p.requires_grad = False\n self.frontend = HuggingfaceFrontend(url=self.url,use_last=(1-weight_sum),encoder_size=encode_size)\n self.backend = Backend(class_num, encoder_size=encode_size)\n\n self.train_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n self.val_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n self.test_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n self.test_top2 = Accuracy(num_classes=self.num_classes, average='macro', top_k=2, task='multiclass')\n self.test_top3 = Accuracy(num_classes=self.num_classes, average='macro', top_k=3, task='multiclass')\n self.test_f1 = F1Score(num_classes=self.num_classes, average='macro', task='multiclass')\n self.confusion = ConfusionMatrix(num_classes=self.num_classes, task='multiclass')\n # class_weights = [float(x) for x in weights.values()]\n # self.class_weights = torch.from_numpy(np.array(class_weights)).float()\n self.conf = conf\n\n def forward(self, x):\n # print(x.shape)\n x = x.squeeze(dim=1)\n # print(x.shape, type(x))\n # x = x.to(DEVICE) # FIXME: Unknown behaviour on return to cpu by feature extractor\n x = self.frontend(x)\n # h = x[\"hidden_states\"]\n # h = torch.stack(h, dim=3)\n # pad_width = (0, 0, 0, 0, 0, 1)\n # h = F.pad(h, pad_width, mode='reflect')\n # print(h.shape)\n out, feature = self.backend(x)\n return out, feature\n\n def configure_optimizers(self, lr=1e-3):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n return optimizer\n \n # def get_layer_weight(self):\n # lw = torch.softmax(self.backend.layer_weights,dim=0)\n # lw.detach().cpu().numpy().copy()\n # return lw\n \n def training_step(self, train_batch, batch_idx):\n x, y = train_batch\n # print(x.shape)\n out, _ = self(x)\n loss = F.cross_entropy(out, y)\n self.log('train_loss', loss, on_epoch=True, on_step=False)\n self.log('train_acc', self.train_acc(out, y), on_step=False, on_epoch=True)\n return loss\n\n def validation_step(self, val_batch, batch_idx):\n x, y = val_batch\n out,_ = self(x)\n loss = F.cross_entropy(out, y)\n self.log('val_loss', loss, on_epoch=True, on_step=False)\n self.log('val_acc', self.val_acc(out, y), on_step=False, on_epoch=True)\n return loss\n\n def test_step(self, test_batch, batch_idx):\n x, y = test_batch\n out,_= self(x)\n self.log('test_accuracy', self.test_acc(out,y), on_epoch=True, on_step=False)\n self.log('test_f1', self.test_f1(out, y), on_epoch=True, on_step=False)\n self.log('test_top2_accuracy', self.test_top2(out, y), on_epoch=True, on_step=False)\n self.log('test_top3_accuracy', self.test_top3(out, y), on_epoch=True, on_step=False)\n self.log('test_confusion', self.confusion(out, y), on_epoch=False, on_step=False)\n\n def on_training_epoch_start(self):\n if (self.current_epoch > self.conf.lin_epoch) and self.freeze_all:\n for p in self.frontend.parameters():\n p.requires_grad = True\n self.frontend.feature_extractor._freeze_parameters()\n \n # def on_test_epoch_start(self) -> None:\n # lw = self.frontend.get_layer_weights()\n # for num,i in enumerate(lw):\n # self.log('layer_weight_{}'.format(num), i, on_epoch=False, on_step=False)\n\n\n# class SSLNet_RAW(nn.Module):\n# def __init__(self,\n# conf,\n# weights:dict or list=None,\n# url=\"microsoft/wavlm-base-plus\",\n# class_num=10,\n# freeze_all=False\n# ):\n \n# super().__init__()\n# self.conf = conf\n# self.num_classes = class_num\n# self.lr = conf.lr\n# encode_size = 24 if \"large\" in url else 12\n# # if param.sr != 16000:\n# # self.resampler = torchaudio.transforms.Resample(orig_freq=param.sr, new_freq=16000)\n# # else:\n# # self.resampler = nn.Identity()\n# self.frontend = AutoModel.from_pretrained(url, trust_remote_code=True, cache_dir='./hfmodels')\n \n\n# for p in self.frontend.parameters():\n# p.requires_grad = False\n \n# self.backend = Backend(class_num, encoder_size=encode_size)\n\n# self.train_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n# self.val_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n# self.test_acc = Accuracy(num_classes=self.num_classes, average='macro', task='multiclass')\n# self.test_top2 = Accuracy(num_classes=self.num_classes, average='macro', top_k=2, task='multiclass')\n# self.test_top3 = Accuracy(num_classes=self.num_classes, average='macro', top_k=3, task='multiclass')\n# self.test_f1 = F1Score(num_classes=self.num_classes, average='macro', task='multiclass')\n# self.confusion = ConfusionMatrix(num_classes=self.num_classes, task='multiclass')\n# # class_weights = [float(x) for x in weights.values()]\n# # self.class_weights = torch.from_numpy(np.array(class_weights)).float()\n\n# def forward(self, x):\n# # print(x.shape)\n# x = x.squeeze(dim=1)\n# # print(x.shape, type(x))\n# # x = x.to(DEVICE) # FIXME: Unknown behaviour on return to cpu by feature extractor\n# x = self.frontend(x, output_hidden_states=True, return_dict=None, output_attentions=None)\n# h = x[\"hidden_states\"]\n# h = torch.stack(h, dim=3)\n# # pad_width = (0, 0, 0, 0, 0, 1)\n# # h = F.pad(h, pad_width, mode='reflect')\n# # print(h.shape)\n# out, feature = self.backend(h)\n# return out, feature\n\n# def configure_optimizers(self, lr=1e-3):\n# optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n# return optimizer\n \n# def get_layer_weight(self):\n# lw = torch.softmax(self.backend.layer_weights, dim=0)\n# lw = lw.detach().cpu().numpy()\n# return lw\n\n# def on_training_epoch_start(self):\n \n# if (self.current_epoch > self.conf.lin_epoch):\n# print(\"finetune epoch\")\n# for p in self.frontend.parameters():\n# self.lr=5e-5\n# p.requires_grad = True\n# self.frontend.feature_extractor._freeze_parameters()\n# else:\n# print(\"probe ep\")\n\n","repo_name":"yamathcy/ssl_singerid","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28145208476","text":"from typing import Optional\nfrom datetime import datetime\n\n\nclass StatsRecord:\n \"\"\"\n An object used to contain the stats\n info for all users at a given time.\n \"\"\"\n\n def __init__(self, time: datetime, stats_dict: dict, canvas_code: str):\n self.time = time\n self.canvas_code = canvas_code\n self._stats = stats_dict\n\n def get(self, username: str) -> Optional[dict]:\n \"\"\"\n Get the stats for a user.\n \"\"\"\n return self._stats.get(username)\n\n @classmethod\n def from_json(cls, json: dict, canvas_code: str):\n \"\"\"\n Generate a StatsRecord object from the data\n at pxls.space/stats/stats.json.\n \"\"\"\n date_str = json[\"generatedAt\"].split(\" (\")[0]\n time = datetime.strptime(date_str, \"%Y/%m/%d - %H:%M:%S\")\n stats_dict = {}\n for stats in json[\"toplist\"][\"canvas\"]:\n stats_dict[stats[\"username\"]] = {\n \"pixels\": stats[\"pixels\"],\n \"place\": stats[\"place\"],\n }\n return cls(time, stats_dict, canvas_code)\n","repo_name":"Seon82/pyCharity","sub_path":"src/handlers/pxls/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"15591842141","text":"import pyrosetta as _pyrosetta\n\n\ndef stub_from_residue(\n residue, center_atom=\"CA\", atom1=\"N\", atom2=\"CA\", atom3=\"C\"\n):\n \"\"\"\n Returns a stub. A wrapper for atom.xyz with the default of the bb atoms.\n \"\"\"\n return _pyrosetta.rosetta.core.kinematics.Stub(\n residue.atom(center_atom).xyz(),\n residue.atom(atom1).xyz(),\n residue.atom(atom2).xyz(),\n residue.atom(atom3).xyz(),\n )\n\n\ndef rt_from_res_atom_sets(\n start_res,\n end_res,\n start_center_atom=\"CA\",\n start_atom1=\"N\",\n start_atom2=\"CA\",\n start_atom3=\"C\",\n end_center_atom=\"CA\",\n end_atom1=\"N\",\n end_atom2=\"CA\",\n end_atom3=\"C\",\n):\n \"\"\"\n Returns the RT from the start to end res: centers stub around given atoms\n\n lazy edit of old func I guess that one is deprecated or something\n\n Defaults to CA as the center atom name, N-CA-C as the plane atoms\n \"\"\"\n\n stub1 = stub_from_residue(\n start_res, start_center_atom, start_atom1, start_atom2, start_atom3\n )\n stub2 = stub_from_residue(\n end_res, end_center_atom, end_atom1, end_atom2, end_atom3\n )\n rt = _pyrosetta.rosetta.core.kinematics.RT(stub1, stub2)\n return rt\n\n\ndef rt_from_res_atoms(\n start_res,\n end_res,\n start_center_atom=\"CA\",\n end_center_atom=\"CA\",\n start_atom1=\"N\",\n start_atom2=\"CA\",\n start_atom3=\"C\",\n end_atom1=\"N\",\n end_atom2=\"CA\",\n end_atom3=\"C\",\n):\n \"\"\"\n Returns the RT from the start to end res: centers stub around given atoms\n\n Defaults to CA as the center atom name, N-CA-C as the plane atoms\n \"\"\"\n\n stub1 = stub_from_residue(\n start_res, start_center_atom, start_atom1, start_atom2, start_atom3\n )\n stub2 = stub_from_residue(\n end_res, end_center_atom, end_atom1, end_atom2, end_atom3\n )\n rt = _pyrosetta.rosetta.core.kinematics.RT(stub1, stub2)\n return rt\n\n\ndef peptide_bond_rt(start_res, end_res):\n \"\"\"\n Returns the RT from the N term of start_res to the C of end_res\n \"\"\"\n rt = rt_from_res_atoms(\n start_res,\n end_res,\n start_center_atom=\"O\",\n start_atom1=\"O\",\n end_center_atom=\"O\",\n end_atom1=\"O\",\n )\n return rt\n\n\ndef residue_ca_rt_dist_sq(source_res1, dest_res1, source_res2, dest_res2):\n \"\"\"\n Returns the distance_squared of the RT of two residue pairs\n\n Uses the CA to CA RT of each pair\n \"\"\"\n return rt_from_res_atoms(source_res1, dest_res1).distance_squared(\n rt_from_res_atoms(source_res2, dest_res2)\n )\n\n\ndef residue_pep_rt_dist_sq(source_res1, dest_res1, source_res2, dest_res2):\n \"\"\"\n Returns the distance_squared of the RT of two residue pairs\n\n Uses the C,N,O to C,N,O RT of each pair\n \"\"\"\n return peptide_bond_rt(source_res1, dest_res1).distance_squared(\n peptide_bond_rt(source_res2, dest_res2)\n )\n","repo_name":"dmitropher/dzutils","sub_path":"dzutils/pyrosetta_utils/geometry/rt_utils.py","file_name":"rt_utils.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27085464143","text":"#01-04-2021 this program is there to allow visitors to buy tickets,\n#get a receipt and get tickets. As well as entering relevant data into a\n#database so the park gates can check the tickets.\n#25-05-2021 Restart project after learning more wxpython.\n\n#Importing Requirements\nimport wx #WX Python, GUI\nfrom wx.adv import DatePickerCtrl\nimport json\n\nclass Moria(wx.Frame):\n \"\"\"\n Main window\n \"\"\"\n def __init__(self, *args, **kw):\n super(Moria,self).__init__(*args, **kw)\n self.initFrame()\n \n def initFrame(self):\n #Create title in top bar.\n self.SetTitle(\"Moria\")\n self.Center() \n #Make the panel larger than any screen to allow free positioning. \n self.panel = wx.Panel(self, size=(3000, 3000))\n #Paint background every time the window changes. \n self.panel.Bind(wx.EVT_PAINT, self.OnPaint)\n #Load titlepage on startup.\n self.page0()\n #What page is displayed?\n self.page = 0\n \n def OnPaint(self, event):\n #The background image depending on screen.\n if self.page == 0:\n backgroundimage = \"Images/MoriaGateBlank.jpg\"\n elif self.page == 1 or self.page == 2 or self.page == 4:\n backgroundimage = \"Images/MoriaGateBoxed.jpg\"\n \n if self.page != 3:\n image = wx.Bitmap(backgroundimage)\n image = wx.Bitmap.ConvertToImage(image)\n #Call screensizer function \n screensizing = self.screensizer()\n #Unpack result screensizer function\n width, height = screensizing\n #Set the program size to a fraction of the screen size in the right proportions\n self.SetSize(screensizing)\n #Make image the right size as background and draw it.\n image=image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)\n image = image.ConvertToBitmap()\n dc=wx.PaintDC(self.panel)\n dc.DrawBitmap(image, 0, 0)\n \n def page0(self):\n #Set page and refresh\n self.page = 0\n self.Refresh()\n #Call screensizer function \n screensizing = self.screensizer()\n #Unpack result screensizer function\n width, height = screensizing\n #Calculate position for button.\n wpos = width\n hpos = height\n wpos = int(wpos/2-40)\n hpos = int(hpos/10*7)\n #Make button\n orderbutton = wx.Button(self.panel, id=wx.ID_ANY, label=\"Order Tickets\",\n pos=(wpos,hpos))\n orderbutton.Bind(wx.EVT_BUTTON, self.page1)\n #Make background black and letters white.\n orderbutton.SetBackgroundColour(\"black\")\n orderbutton.SetForegroundColour(\"white\")\n #Calculate position for Banner.\n wpos = width\n hpos = height\n wpos = int(wpos/2-90)\n hpos = int(hpos/10*1)\n #Make Banner \n title = wx.StaticText(self.panel, id=wx.ID_ANY, label=\"Welcome to Moria\",\n pos=(wpos,hpos))\n #Make font larger.\n font = wx.Font(18, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n title.SetBackgroundColour(\"black\")\n title.SetForegroundColour(\"white\")\n #Set Font to banner\n title.SetFont(font)\n \n def page1(self, event):\n #Write data depending on source page.\n if self.page == 0:\n self.num17tick = 0\n self.num64tick = 0\n self.num65tick = 0\n #Set default price to 0\n self.price = 0\n #self.date = wx.DateTime.Now()\n returning = False\n elif self.page == 2: \n returning = True \n \n #Set page and refresh\n self.page = 1\n self.Refresh()\n #Call screensizer function \n screensizing = self.screensizer()\n #Unpack result screensizer function\n width, height = screensizing\n #Empty last page\n for child in self.panel.GetChildren():\n child.Destroy()\n \n #Position banner\n wpos = width\n hpos = height\n wpos = int(wpos/2-90)\n hpos = int(hpos/10*1)\n #Make Banner\n title = wx.StaticText(self.panel, id=wx.ID_ANY, label=\"Order your tickets\",\n pos =(wpos,hpos))\n #Make font larger.\n font = wx.Font(18, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n title.SetBackgroundColour(\"black\")\n title.SetForegroundColour(\"white\")\n #Set Font to banner\n title.SetFont(font)\n \n #Position text\n wpos = width\n hpos = height\n wpos = int(wpos/2-70)\n hpos = int(hpos/10*4)\n #Make Banner\n text = wx.StaticText(self.panel, id=wx.ID_ANY, label=\"How many tickets?\",\n pos =(wpos,hpos))\n #Make font larger.\n font = wx.Font(14, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n text.SetBackgroundColour(\"#030303\")\n text.SetForegroundColour(\"white\")\n #Set Font to banner\n text.SetFont(font)\n \n #Create panel for ticket ordering. Make sure to set parent correctly if UI immutable.\n wpos = width\n hpos = height\n wpos = int(wpos/4)\n hpos = int(hpos/10*4.5)\n pwith = int(width/16*9)\n pheight = int(height/10*4)\n \n orderpanel = wx.Panel(parent = self.panel , size=(pwith,pheight),\n pos = (wpos, hpos), style=wx.TRANSPARENT_WINDOW)\n\n #Create sizer\n gridbox = wx.GridBagSizer(8,5)\n \n #Make age3\n self.text = wx.StaticText(orderpanel, label=\"Age 0-3 are free:\")\n #Make font larger.\n font = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n self.text.SetBackgroundColour(\"#030303\")\n self.text.SetForegroundColour(\"white\")\n #Set Font to age3\n self.text.SetFont(font)\n\n #Make age17\n self.text1 = wx.StaticText(orderpanel, label=\"Age 4-17:\")\n #Make font larger.\n font = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n self.text1.SetBackgroundColour(\"#030303\")\n self.text1.SetForegroundColour(\"white\")\n #Set Font to age17\n self.text1.SetFont(font)\n \n #Make age64\n self.text2 = wx.StaticText(orderpanel, label=\"Age 18-64:\")\n #Make font larger.\n font = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n self.text2.SetBackgroundColour(\"#030303\")\n self.text2.SetForegroundColour(\"white\")\n #Set Font to age64\n self.text2.SetFont(font)\n\n #Make age65+\n self.text3 = wx.StaticText(orderpanel, label=\"Age 65+:\")\n #Make font larger.\n font = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n self.text3.SetBackgroundColour(\"#030303\")\n self.text3.SetForegroundColour(\"white\")\n #Set Font to age65+\n self.text3.SetFont(font)\n \n #counter17\n self.counter17 = wx.SpinCtrl(orderpanel, initial=0)\n self.counter17.SetRange(0,100) \n #counter64\n self.counter64 = wx.SpinCtrl(orderpanel, initial=0)\n self.counter64.SetRange(0,100)\n #counter65+\n self.counter65 = wx.SpinCtrl(orderpanel, initial=0)\n self.counter65.SetRange(0,100)\n \n #Make discount\n self.text4 = wx.StaticText(orderpanel, label=\"If you buy 5 tickets or \"\n \"more you get a €5,- discount.\", style=wx.TE_MULTILINE,\n size=(-1,-1))\n self.text4.Wrap(int(width/16*5))\n #Make font larger.\n font = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n self.text4.SetBackgroundColour(\"#030303\")\n self.text4.SetForegroundColour(\"white\")\n #Set Font to age65+\n self.text4.SetFont(font)\n \n #Detect change in amount tickets and calculate price.\n self.counter17.Bind(wx.EVT_SPINCTRL, self.calcprice)\n self.counter64.Bind(wx.EVT_SPINCTRL, self.calcprice)\n self.counter65.Bind(wx.EVT_SPINCTRL, self.calcprice)\n \n #Make price\n self.text5 = wx.StaticText(orderpanel, label=\"The total price will be €\"\n + str(self.price) + \",-\")\n #Make font larger.\n font = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n self.text5.SetBackgroundColour(\"#030303\")\n self.text5.SetForegroundColour(\"white\")\n #Set Font to price+\n self.text5.SetFont(font)\n \n #Make DateLabel\n self.text6 = wx.StaticText(orderpanel, label=\"When will you \"\n \"descend into the depths?\")\n #Make font larger.\n font = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n self.text6.SetBackgroundColour(\"#030303\")\n self.text6.SetForegroundColour(\"white\")\n #Set Font to age65+\n self.text6.SetFont(font)\n \n #Create Date widget\n self.calendar = DatePickerCtrl(orderpanel, style=wx.adv.DP_DROPDOWN)\n self.calendar.SetRange(wx.DateTime.Now(),(wx.DateTime.FromDMY(5,5,3000)))\n #Create control date to check if date has changed later.\n self.controldate = wx.DateTime.Now()\n self.controldate = self.controldate.GetDateOnly()\n\n #Create ticket button\n self.ticketbutton = wx.Button(orderpanel, id=wx.ID_ANY, label=\"Order Tickets\")\n #Make background black and letters white.\n self.ticketbutton.SetBackgroundColour(\"black\")\n self.ticketbutton.SetForegroundColour(\"white\") \n #Add button function\n self.ticketbutton.Bind(wx.EVT_BUTTON, self.page2)\n \n #Add age3 to sizer\n gridbox.Add(self.text, pos=(0,0), span=(1,2))\n #Add age17 to sizer\n gridbox.Add(self.text1, pos=(1,0))\n #Add age64 to sizer\n gridbox.Add(self.text2, pos=(2,0)) \n #Add age65+ to sizer\n gridbox.Add(self.text3, pos=(3,0))\n #Add counter17 to sizer\n gridbox.Add(self.counter17, pos=(1,1), flag=wx.ALIGN_CENTER)\n #Add counter64 to sizer\n gridbox.Add(self.counter64, pos=(2,1), flag=wx.ALIGN_CENTER)\n #Add counter65 to sizer\n gridbox.Add(self.counter65, pos=(3,1), flag=wx.ALIGN_CENTER)\n #Add discount to sizer\n gridbox.Add(self.text4, pos=(1,2),span=(3,3))\n #Add price to sizer\n gridbox.Add(self.text5, pos=(4,1),span=(1,5))\n #Add DataLabel to sizer\n gridbox.Add(self.text6, pos=(6,0),span=(1,5), flag=wx.ALIGN_CENTER,\n border = 20)\n #Add Calander to sizer\n gridbox.Add(self.calendar, pos=(7,1),span=(1,5))\n #Add ticketbutton to sizer\n gridbox.Add(self.ticketbutton, pos=(8,0), span=(1,5), flag=wx.ALIGN_CENTER)\n \n if returning == True:\n #Set the counters to old values\n self.counter17.SetValue(self.num17tick)\n self.counter64.SetValue(self.num64tick)\n self.counter65.SetValue(self.num65tick)\n #Set date to old value\n if self.date != self.controldate:\n self.calendar.SetValue(self.date)\n \n orderpanel.SetSizer(gridbox)\n orderpanel.Layout()\n\n def page2(self, event):\n #Save the selected date.\n self.date=self.calendar.GetValue()\n #Set page and refresh\n self.page = 2\n self.Refresh()\n #Call screensizer function \n screensizing = self.screensizer()\n #Unpack result screensizer function\n width, height = screensizing\n #Empty last page\n for child in self.panel.GetChildren():\n child.Destroy()\n #Position banner\n wpos = width\n hpos = height\n wpos = int(wpos/2-90)\n hpos = int(hpos/10*1)\n #Make Banner\n title = wx.StaticText(self.panel, id=wx.ID_ANY, label=\"Please confirm \"\n \"your order.\", pos =(wpos,hpos))\n #Make font larger.\n font = wx.Font(18, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n title.SetBackgroundColour(\"black\")\n title.SetForegroundColour(\"white\")\n #Set Font to banner\n title.SetFont(font) \n \n #Create panel for bounding confirmation information.\n wpos = width\n hpos = height\n wpos = int(wpos/40*9)\n hpos = int(hpos/20*7)\n pwith = int(width/16*9)\n pheight = int(height/10*4)\n confirmpanel = wx.Panel(parent = self.panel , size=(pwith,pheight),\n pos = (wpos, hpos), style=wx.TRANSPARENT_WINDOW) \n \n #Made date more readable.\n self.displaydate = str(self.date)\n self.displaydate = self.displaydate.split()\n #Confirmation text\n confirmationtext = (\"Thank you for braving the deapths of Moria please \"\n \"confirm all information has been entered correctly. \\n\\n\"\n \"You're welcome on: \" + str(self.displaydate[0]) + \"\\n\" +\n str(self.num17tick) + \" tickets for people under 18.\\n\" +\n str(self.num64tick) + \" tickets for people under 65.\\n\" +\n str(self.num65tick) + \" tickets for people at or over 65.\\n\" +\n \"The total cost will come to €\" + str(self.price) + \",-\\n\\n\"\n \"If this all is correct we hope to see you soon, \"\n \"If not please return to the previous page.\")\n #Make conformation information\n self.text7 = wx.StaticText(confirmpanel, label=confirmationtext,\n style=wx.TE_MULTILINE, size=(-1,-1))\n self.text7.Wrap(int(width/16*8))\n #Make font larger.\n font = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)\n #Make background black and letters white.\n self.text7.SetBackgroundColour(\"#030303\")\n self.text7.SetForegroundColour(\"white\")\n #Set Font to confirmation text+\n self.text7.SetFont(font)\n \n wpos = int(10)\n hpos = int(height/20*7)\n #Create return button\n self.returnbutton = wx.Button(confirmpanel, id=wx.ID_ANY, \n label=\"Change Tickets\", pos=(wpos,hpos))\n #Make background black and letters white.\n self.returnbutton.SetBackgroundColour(\"black\")\n self.returnbutton.SetForegroundColour(\"white\") \n #Add button function\n self.returnbutton.Bind(wx.EVT_BUTTON, self.page1)\n \n bwidth = self.returnbutton.Size.GetWidth()\n wpos = int(bwidth + 20)\n hpos = int(height/20*7)\n #Create pay button\n self.paybutton = wx.Button(confirmpanel, id=wx.ID_ANY, \n label=\"Go to Payments\", pos=(wpos,hpos))\n #Make background black and letters white.\n self.paybutton.SetBackgroundColour(\"black\")\n self.paybutton.SetForegroundColour(\"white\") \n #Add button function\n self.paybutton.Bind(wx.EVT_BUTTON, self.page3)\n \n confirmpanel.Layout()\n \n def page3(self, event):\n #Set page and refresh\n self.page = 3\n self.Refresh()\n #Call screensizer function \n screensizing = self.screensizer()\n #Unpack result screensizer function\n width, height = screensizing\n #Save the order to file.\n \n #Empty last page\n for child in self.panel.GetChildren():\n child.Destroy()\n \n #Create fakepaying button\n self.fakepaybutton = wx.Button(self.panel, id=wx.ID_ANY, \n label=\"'pay' Tickets\", pos=(width/2, height/2))\n #Make background black and letters white.\n self.fakepaybutton.SetBackgroundColour(\"white\")\n self.fakepaybutton.SetForegroundColour(\"black\") \n #Add button function\n self.fakepaybutton.Bind(wx.EVT_BUTTON, self.page4)\n \n def page4(self, event):\n #Create dictionary for order.\n order = {\n \"dateordered\" : self.controldate,\n \"price\" : self.price,\n }\n #Add up number of tickets\n self.numticks = self.num17tick+self.num64tick+self.num65tick\n self.tick = 0\n while self.tick != self.numticks:\n if self.tick < self.num17tick:\n age = 17\n elif self.tick < (self.num17tick+self.num64tick):\n age = 64\n else:\n age = 65\n\n order[self.tick] = {\n \"datevisit\" : self.displaydate,\n \"age\" : age,\n \"qr\" : \"\"\n }\n self.tick += 1\n #Set page and refresh\n self.page = 4\n self.Refresh()\n #Call screensizer function \n screensizing = self.screensizer()\n #Unpack result screensizer function\n width, height = screensizing\n #Empty last page\n for child in self.panel.GetChildren():\n child.Destroy() \n \n def screensizer(self):\n #Get size of the screen.\n screensize = wx.DisplaySize()\n #If screen ration > 0.5625 limit width, < limit height\n if (screensize[0]/screensize[1])<= 0.5625:\n screensizing=(int((screensize[0]-100)*0.9), \n int((screensize[0]-100)/0.562*0.9))\n else:\n screensizing=(int((screensize[1]/16*9)*0.9), \n int(screensize[1]*0.9))\n #Return the width and height for other functions.\n return screensizing\n \n def calcprice(self,event):\n #Calculate price\n #Retrieve amount of tickets\n self.num17tick = self.counter17.GetValue()\n self.num64tick = self.counter64.GetValue()\n self.num65tick = self.counter65.GetValue()\n #Calculate price\n self.price = self.num17tick*5+self.num64tick*10+self.num65tick*8\n if (self.num17tick+self.num64tick+self.num65tick) >= 5:\n self.price -= 5\n #Make price\n self.text5.SetLabel(\"The total price will be €\"\n + str(self.price) + \",-\")\n\ndef main():\n app=wx.App()\n frm = Moria(None, title=\"MoriaTickets\")\n frm.Show()\n app.MainLoop()\n\nif __name__==\"__main__\":\n main()\n","repo_name":"cheetawilliam/Moria","sub_path":"TicketMachine.py","file_name":"TicketMachine.py","file_ext":"py","file_size_in_byte":18363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27447793695","text":"import requests\nimport json\nimport datetime as dt\nimport CSSMJSONParser as cssm_parser\nimport pandas as pd\nimport io\nfrom WBXTeamsBEIntegration import WBXTeamsBEIntegration\nfrom requests_toolbelt import MultipartEncoder\nfrom loguru import logger\nimport functools\nimport os\nfrom cachetools import cached, \\\n TTLCache # 1 - let's import the \"cached\" decorator and the \"TTLCache\" object from cachetools\nimport threading\n\n__author__ = \"Tim Taylor \"\n__contributors__ = []\n__copyright__ = \"Copyright (c) 2019 Cisco and/or its affiliates.\"\n__license__ = \"Cisco Sample Code License, Version 1.0\"\n\n\n\nbe_service_name = 'SLD-WebexTeams-Bot-Service'\n\n# Relevant URL's\nurl_base = \"https://www.easysmartaccounts.com:10000/api/userinfo/access-token\"\nbe_login_url = os.environ.get('SMART_LICENSING_LOGIN_URL')\ncssm_url = os.environ.get('CSSM_DOMAIN')\n\n# We cache our data for a few minutes so that subsequent requests from the client do not take too long.\nlicense_cache = TTLCache(maxsize=100, ttl=300)\n\n# for debugging purposes.\nARE_DEBUGGING=False\nfile_name = \"./license_by_sa-va_full.json\"\n\n# function that helps with decorator for logging in/out of functions/methods\ndef logger_wraps(*, entry=True, exit=True, level=\"DEBUG\"):\n def wrapper(func):\n name = func.__name__\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n logger_ = logger.opt(depth=1)\n if entry:\n logger_.log(level, \"Entering '{}'\", name)\n result = func(*args, **kwargs)\n if exit:\n logger_.log(level, \"Exiting '{}'\", name)\n return result\n\n return wrapped\n\n return wrapper\n\n\n# This class is where the magic happens so far as being able to pull and parse the json from CSSM.\nclass SmartAccountSDK:\n @logger_wraps()\n def __init__(self, host, token):\n\n self.host = host\n self.token = token\n self.__all_licenses = None\n\n @logger_wraps()\n def list_accounts(self):\n \"\"\" returns all accounts associated with a user\n TODO: This could be a lot more defensive in terms of errors.\n \"\"\"\n\n logger.info(\"preparing request\")\n call = \"/services/api/smart-accounts-and-licensing/v2/accounts/\"\n uri = (\"https://\" + self.host + call)\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + self.token}\n logger.info(\"done preparing request\")\n response = requests.get(uri, headers=headers, verify=True)\n logger.info(\"received response\")\n\n request_successful = False\n response_json = None\n if response.status_code == 200:\n logger.info(\"request was successful. status code 200\")\n request_successful = True\n response_json = response.json()\n\n else:\n logger.info(\" response not successful, response status code: {}\".format(response.status_code))\n try:\n logger.info(\" response body: \\n{}\".format(response.json()))\n except:\n logger.info(\" no response body\")\n #logger.info('list_accounts json: \\n{}'.format(json.dumps(response_json, indent=4)))\n return request_successful, response.status_code, response_json\n\n @logger_wraps()\n def list_licenses(self, sa_domain, virtual_accounts):\n call = \"/services/api/smart-accounts-and-licensing/v1/accounts/\" + sa_domain + \"/licenses\"\n uri = (\"https://\" + self.host + call)\n\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + self.token}\n body = {\"limit\": 100, \"offset\": 0, 'virtualAccounts': virtual_accounts}\n\n #logger.info('list_licenses json body: \\n{}'.format(body))\n\n response = requests.post(uri, headers=headers, data=json.dumps(body), verify=True)\n\n status_code = response.status_code\n request_successful = False\n response_json = None\n if status_code == 200:\n logger.info(\"request was successful. status code 200\")\n response_json = response.json()\n request_successful = True\n else:\n logger.info(\" response not successful, response status code: {}\".format(response.status_code))\n try:\n logger.info(\" response body: \\n{}\".format(response.json()))\n except:\n logger.info(\" no response body\")\n\n #logger.info('list_licenses json: \\n{}'.format(json.dumps(response_json, indent=4)))\n return request_successful, status_code, response_json\n\n @logger_wraps()\n def retrieve_and_process_licenses(self, sa, virtual_account_list):\n account_domain = sa.get(\"accountDomain\")\n # logger.info('account domain: {}'.format(account_domain))\n # logger.info('virtual_account_list: \\n{}'.format(virtual_account_list))\n\n request_successful, status_code, json_array = self.list_licenses(account_domain, [])\n\n if request_successful == False:\n logger.info('Failure with list_licenses. status code: {}'.format(status_code))\n\n else:\n logger.info('Success with list_licenses. status code: {}'.format(status_code))\n\n domain_licenses = json_array.get(\"licenses\")\n logger.info('list_licenses request_successful: {}\\n'.format(request_successful))\n\n va_dict = {}\n # Sort licenses into lists and put them into dictionary with key = virtualAccount\n\n for lic in domain_licenses:\n # logger.info('license: {}'.format(lic))\n if lic[\"virtualAccount\"] not in va_dict.keys():\n # logger.info('license not in va_dict keys: {}'.format({lic[\"virtualAccount\"]: [lic]}))\n va_dict.update({lic[\"virtualAccount\"]: [lic]})\n else:\n va_dict[lic[\"virtualAccount\"]].append(lic)\n\n for the_virtual_account_key, licenses in va_dict.items():\n logger.info('the_virtual_account_key: {}\\n'.format(the_virtual_account_key))\n the_roles = sa.get(\"roles\")\n isFound = False\n for the_role in the_roles:\n if \"virtualAccount\" in the_role:\n if the_virtual_account_key == the_role['virtualAccount']:\n isFound = True\n the_role.update({\"licenses\": licenses})\n\n if isFound == False:\n # need to do this because of some of the wonkiness associated with the BU Production Test Domain.\n the_roles.append({'role': \"APPENDED VA USER\", \\\n \"virtualAccount\": the_virtual_account_key,\n \"licenses\": licenses})\n\n return request_successful, status_code\n\n @logger_wraps()\n def list_all_licenses(self):\n\n empty_virt_account_request_successful = False\n virt_account_request_successful = False\n status_code = -999\n\n # lazy loading of the licenses\n if self.__all_licenses == None:\n\n accounts = None\n\n # Get the list of accounts first\n request_successful, status_code, the_accounts = self.list_accounts()\n logger.info('list accounts request_successful: {}\\n'.format(request_successful))\n\n if request_successful:\n # Getting the accounts was successful Now loop through and get the virtual accounts\n\n accounts = the_accounts.get(\"accounts\")\n for sa in accounts:\n # create list of virtual accounts under Smart Account\n # first the empty virtual accounts list to try and get everything because some licenses may not\n # actually be in the virtual accounts.\n\n logger.info('Starting getting licenses with virtual accounts set to []')\n\n empty_virt_account_request_successful, status_code = self.retrieve_and_process_licenses(sa, [])\n if empty_virt_account_request_successful:\n logger.success('Empty virtual accounts\\n successful: {}\\n' \\\n ' status code: {}'.format(empty_virt_account_request_successful, status_code))\n else:\n logger.error('Empty virtual accounts\\n successful: {}\\n' \\\n ' status code: {}'.format(empty_virt_account_request_successful,\n status_code))\n\n virtual_account_list = [va.get(\"virtualAccount\") for va in sa.get(\"roles\") if va.get(\"virtualAccount\")]\n\n logger.info('Starting getting licenses with virtual accounts set as a list of virtual accounts')\n\n # next get the licenses for the virtual accounts\n if len(virtual_account_list) > 0:\n logger.info('Virtual Accounts length was greater than zero')\n virt_account_request_successful, status_code = self.retrieve_and_process_licenses(sa, virtual_account_list)\n if virt_account_request_successful:\n logger.success('Full virtual accounts\\n successful: {}\\n' \\\n 'status code: {}'.format(virt_account_request_successful, status_code))\n else:\n logger.error('Full virtual accounts\\n successful: {}\\n' \\\n 'status code: {}'.format(virt_account_request_successful, status_code))\n\n\n else:\n logger.info('Virtual Accounts length was 0')\n\n account_request_success = (empty_virt_account_request_successful or virt_account_request_successful)\n logger.error('account_request_success: {}, for domain: {}'.format(account_request_success,\n sa.get(\"accountDomain\")))\n\n\n\n self.__all_licenses = accounts\n\n # send back the info including whether or not things were successful. This way we can provide feedback to the user.\n # Typical reasons for problems have to do with the SSO having to be re-done. i.e. the token from CSSM needs to be\n # refreshed. TODO for CSSM to make their token system more like Webex Teams.\n\n overall_request_successful = (empty_virt_account_request_successful or virt_account_request_successful)\n logger.info(\"request_successful: {}, status code: {}\".format(overall_request_successful, status_code))\n\n return overall_request_successful, self.__all_licenses\n\n\n# This function retrieve the CSSM token from the Smart Licensing System Backend.\n@logger_wraps()\ndef retrieve_be_token(be_secret_key, roomId, personId):\n rest_verb = 'GET'\n\n t = dt.datetime.utcnow()\n\n the_time = t.strftime('%Y%m%dT%H%M%SZ')\n\n headers = {'X-SLD-Date': the_time,\n 'Content-Type': 'application/json'}\n\n token_request_url = \"{}?roomId={}&personId={}\".format(url_base, roomId, personId)\n\n # Create the signature so that the BE can verify authenticity of our request\n logger.info('{}, generating signature\\n'.format(dt.datetime.now()))\n\n the_signature = WBXTeamsBEIntegration.signature(be_secret_key, headers, rest_verb, token_request_url,\n json.dumps({}), be_service_name)\n logger.info(' signature generated\\n')\n\n headers['X-SLD-BOT-Signature'] = the_signature\n\n # Send the request\n logger.info(' sending request to BE server\\n')\n response = requests.get(token_request_url, json=json.dumps({}), headers=headers)\n logger.info(' received response from BE server\\n')\n\n the_token = \"\"\n request_successful = False\n response_dict = {}\n\n if response.status_code == 200:\n # Check the response signature to verify authenticity\n headers = {'X-SLD-Date': response.headers['X-SLD-Date'],\n 'Content-Type': 'application/json'}\n\n response_signature = response.headers['X-SLD-BE-Signature']\n response_json = None\n try:\n response_json = json.dumps(response.json())\n except:\n logger.info(\" no response body\")\n\n calculated_response_signature = WBXTeamsBEIntegration.signature(be_secret_key, headers,\n 'Response',\n \"\",\n response_json,'X-SLD-BE-Service')\n\n if response_signature == calculated_response_signature:\n logger.success('Response signature authenticated')\n request_successful = True\n response_dict = json.loads(response.text)\n the_token = response_dict['access_token']\n else:\n logger.error('Response NOT signature authenticated')\n\n\n return request_successful, response.status_code, the_token\n\n\n# We are caching the license information for a few minutes so that response time to the user is faster.\n@logger_wraps()\n@cached(license_cache)\ndef get_cssm_license(room_id, bot_token, account_credentials=\"\"):\n\n # Provide feedback to the user that fetching the info will take some time.\n please_wait_message = \"Retrieving info from the Cisco Smart Software Manager. This might take some time.\"\n x = threading.Thread(target=send_processing_status_message, args=(room_id, bot_token, please_wait_message))\n x.start()\n\n cssm_license = None\n request_successful = False\n\n # Retrieve the licenses\n smart_account = SmartAccountSDK(cssm_url, account_credentials)\n\n logger.info('entering smart_account.list_all_licenses()')\n request_successful, json_array = smart_account.list_all_licenses()\n\n logger.info('have exited smart_account.list_all_licenses()')\n logger.info('request_successful: {}\\n'.format(request_successful))\n\n if request_successful:\n # Retrieving the licenses from CSSM was successful. Update the user and parse the info into a CSSMLicense object\n retrieved_all_info_message = \"Retrieved all the info from the CSSM Server, just a bit more time to get your request.\"\n send_processing_status_message(room_id, bot_token, retrieved_all_info_message)\n\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n\n\n logger.info('request_successful: {}\\n, cssm_license: {}\\n'.format(request_successful, cssm_license))\n\n return request_successful, cssm_license\n\n# Generic function to provide status messages.\n@logger_wraps()\ndef send_processing_status_message(room_id, bot_token, status_message):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id, 'markdown': status_message}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting send_please_wait was successful\")\n else:\n logger.info(\"posting send_please_wait not successful. Status code: {}\".format(\n request_response_results[1][\"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n\n# Creates the status message from a CSSMLicense object\n# The status message provides a brief summary so we limit things to the top x items. Whereas the normal commands\n# will provide more verbose output.\n#\n# Since we are have all the info in the CSSMLicense Object, this goes pretty quickly\n@logger_wraps()\ndef create_license_status_message(cssm_license):\n accounts_dict = cssm_license.cssm_expired_licenses()\n\n # logger.info(\" accounts: dict = \")\n\n msg = \"**Here is the high level status of your licensing**:\\n\"\n\n # Top five expired licenses first.\n expired_dict = cssm_license.cssm_top_five_expired_licenses()\n\n if expired_dict==None:\n expired_dict={}\n if len(expired_dict) > 0:\n msg = msg + '* [Top 5 Expired Licenses]({} \"Expired License Link\")\\n'.format(be_login_url)\n\n for accountName, virtualAccounts in expired_dict.items():\n msg = msg + ' * **{}**\\n'.format(accountName)\n\n for virtualAccount_name, licenses_dict in virtualAccounts.items():\n msg = msg + ' * {}\\n'.format(virtualAccount_name)\n\n for license_name, license_dict in licenses_dict.items():\n msg = msg + ' * {}, Qty: {}, Expired: {}\\n'.format(license_name,\n license_dict['quantity'],\n license_dict['endDate'])\n\n else:\n msg = msg + '* [There were no expired licenses]({} \"Expired License Link\")!\\n'.format(be_login_url)\n\n # Top Five Future Expiring licenses\n\n future_expired_licenses = cssm_license.cssm_top_five_future_expired_licenses(expiration_days=180)\n if future_expired_licenses==None:\n future_expired_licenses={}\n logger.info('keys future_expired_licenses: {}'.format(future_expired_licenses.keys()))\n expired_dict = future_expired_licenses['future_expired_licenses']\n if len(expired_dict) > 0:\n msg = msg + '* [Top 5 Licenses Expiring in the Next 180 Days]({} \"Future Expired License Link\")\\n'.format(\n be_login_url)\n\n for accountName, virtualAccounts in expired_dict.items():\n msg = msg + ' * **{}**\\n'.format(accountName)\n\n for virtualAccount_name, licenses_dict in virtualAccounts.items():\n msg = msg + ' * {}\\n'.format(virtualAccount_name)\n\n for license_name, license_details_list in licenses_dict.items():\n if len(license_details_list) == 1:\n license_detail = license_details_list[0]\n msg = msg + ' * **{}**, Qty: {} expire on: {}\\n'.format(license_name,\n license_detail['quantity'],\n license_detail['endDate'])\n\n else:\n\n msg = msg + ' * **{}**\\n'.format(license_name)\n\n for license_detail in license_details_list:\n msg = msg + ' * Qty: {}, Expiring: {}\\n'.format(license_detail['quantity'],\n license_detail['endDate'])\n\n else:\n msg = msg + '* [There are no licenses that expire in the next 180 days]({} \"Future Expired License Link\")!\\n'.format(\n be_login_url)\n\n # Top five license shortages.\n shortage_dict = cssm_license.cssm_license_top_five_shortage()\n if shortage_dict == None:\n shortage_dict={}\n\n if len(shortage_dict) > 0:\n msg = msg + '* [Top 5 License Shortages]({} \"License Shortage Link\")\\n'.format(be_login_url)\n\n for accountName, virtualAccounts_dict in shortage_dict.items():\n logger.info('accountName: {}'.format(accountName))\n logger.info('shortage_virtualAccounts_dict: {}'.format(virtualAccounts_dict))\n msg = msg + ' * **{}**\\n'.format(accountName)\n\n for virtualAccount_name, licenses_list in virtualAccounts_dict.items():\n msg = msg + ' * {}\\n'.format(virtualAccount_name)\n logger.info('licenses_list: {}'.format(licenses_list))\n for license_dict in licenses_list:\n logger.info('license_dict: {}'.format(license_dict))\n msg = msg + ' * {}, has a shortage of {} licenses\\n'.format(license_dict['license'],\n license_dict['shortage'])\n\n else:\n msg = msg + '* [License Shortage: There are no license shortages]({} \"License Shortage Link\")\\n'.format(be_login_url)\n\n # Top five licenses by usage\n usage_dict = cssm_license.cssm_top_license_usage_dict()\n if usage_dict==None:\n usage_dict={}\n if len(usage_dict) > 0:\n msg = msg + '* [Top 5 Licenses By Consumption]({} \"License Consumption Link\")\\n'.format(be_login_url)\n for accountName, virtualAccounts_dict in usage_dict.items():\n logger.info('accountName: {}'.format(accountName))\n logger.info('shortage_virtualAccounts_dict: {}'.format(virtualAccounts_dict))\n msg = msg + ' * **{}**\\n'.format(accountName)\n\n for virtualAccount_name, licenses_dict in virtualAccounts_dict.items():\n msg = msg + ' * {}\\n'.format(virtualAccount_name)\n logger.info('licenses_list: {}'.format(licenses_dict))\n for license_name, license_dict in licenses_dict.items():\n logger.info('license_dict: {}'.format(license_dict))\n msg = msg + ' * {}, has {:.1f}% utilization\\n'.format(license_name,\n license_dict['usage'])\n\n else:\n msg = msg + '* [License Usage: There are no licenses in use right now]({} \"License Consumption Link\")\\n'.format(be_login_url)\n\n\n # Architecture mix summary.\n technology_dict = cssm_license.cssm_top_license_technology_dict()\n if technology_dict==None:\n technology_dict={}\n if len(technology_dict) > 0:\n logger.info('technology_dict: {}'.format(technology_dict))\n msg = msg + '* [Here is your architecture mix, by Account]({} \"License Architecture Mix Link\")\\n'.format(be_login_url)\n for accountName, architecture_dict in technology_dict.items():\n logger.info('accountName: {}'.format(accountName))\n msg = msg + ' * **{}**\\n'.format(accountName)\n\n for architecture_name, usage_info in architecture_dict.items():\n msg = msg + ' * {}: {:.1f}%\\n'.format(architecture_name, usage_info['inUse'])\n\n else:\n msg = msg + '* [License Architecture Mix: There are no licenses in use right now]({} \"License Architecture Mix Link\")\\n'.format(be_login_url)\n\n return msg\n\n\n@logger_wraps()\ndef prepare_license_status_message(room_id, bot_token, account_credentials=\"\"):\n request_successful = False\n cssm_license = None\n\n if ARE_DEBUGGING:\n #For testing only until we can pull data from another source\n\n with open(file_name) as json_data:\n json_array = json.load(json_data)\n\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n if request_successful:\n logger.info('done getting list of licenses')\n return create_license_status_message(cssm_license)\n\n else:\n logger.info('issue with generating the license')\n return \"**There was a problem retrieving the license shortage information. Click [here]({}) to log back in.**\".format(\n be_login_url)\n\n\n# Function called by smartdashpullbot to send an overall status. This function calls on the prepare and create functions\n# above to get the message to package up and send to webex teams in response to a request.\n@logger_wraps()\ndef send_license_status_update(room_id, bot_token, account_credentials):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id, 'markdown': prepare_license_status_message(room_id, bot_token, account_credentials)}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info(' got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting license_shortage was successful\")\n else:\n logger.info(\"posting license_shortage not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n\n@logger_wraps()\ndef create_license_shortages_message(cssm_license):\n accounts_dict = cssm_license.cssm_license_shortage()\n\n msg = \"**There are no licenses shortages!!**\"\n\n if len(accounts_dict) > 0:\n\n msg = \"**Here is a list of licenses with shortages, grouped by Account and Virtual Account**:\\n\"\n for account_key in accounts_dict.keys():\n virtual_accounts = accounts_dict[account_key]\n msg = msg + \"* **{}**\\n\".format(account_key)\n\n if len(virtual_accounts) > 0:\n for virtual_account_key in virtual_accounts.keys():\n msg = msg + \" * {}\\n\".format(virtual_account_key)\n licenses = virtual_accounts[virtual_account_key]\n\n if len(licenses) > 0:\n for license in licenses:\n msg = msg + \" * There is a shortage of **{}** licenses for \\\"**{}**\\\"\\n\" \\\n .format(license['inUse'] - license['quantity'], license['license'])\n\n return msg\n\n\n@logger_wraps()\ndef prepare_license_shortage_message(room_id, bot_token, account_credentials=\"\"):\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open(file_name) as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n if request_successful:\n logger.info('done getting list of licenses')\n return create_license_shortages_message(cssm_license)\n else:\n logger.info('issue with generating the license')\n return \"**There was a problem retrieving the license shortage information. Click [here]({}) to log back in.**\".format(\n be_login_url)\n\n\n# Function called by smartdashpullbot to send information on license shortages. This function calls on the prepare and create functions\n# above to get the message to package up and send to webex teams in response to a request.\n@logger_wraps()\ndef send_license_shortage(room_id, bot_token, account_credentials):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id, 'markdown': prepare_license_shortage_message(room_id, bot_token, account_credentials)}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info(' got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting license_shortage was successful\")\n else:\n logger.info(\"posting license_shortage not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n# This function creates the message for license usage. It decides, based upon how many licenses, whether or not to\n# truncate the message and notify the calling function about whether or not they need to send an excel file to that they\n# get all the information.\n@logger_wraps()\ndef create_license_usage_message(cssm_license):\n\n msg = \"**There are no licenses being used!**\"\n\n usage_info_dict = cssm_license.cssm_license_usage_dict()\n\n limit = 0\n\n should_export_as_excel = False\n\n if len(usage_info_dict) > 0:\n usage_size = usage_info_dict['dict_size']\n usage_dict = usage_info_dict['usage_dict']\n if usage_size > 9:\n should_export_as_excel = True\n msg = '**There are a large number of licenses. Here is the license usage for the top 10 by account and virtual account**:\\n'\n else:\n msg = '**Here is the license usage by account and virtual account**: \\n'\n\n for accountName, virtualAccounts_dict in usage_dict.items():\n msg = msg + '* **{}**\\n'.format(accountName)\n\n for virtualAccount_name, licenses_dict in virtualAccounts_dict.items():\n msg = msg + ' * {}\\n'.format(virtualAccount_name)\n\n for license_name, license_dict in licenses_dict.items():\n\n msg = msg + ' * {}, has {:.1f}% utilization\\n'.format(license_name,\n license_dict['usage'])\n limit = limit +1\n if limit==10:\n msg = msg + '\\nWill export all the licenses usage info to an excel file.\\n'\n break\n if limit==10:\n break\n if limit == 10:\n break\n\n return msg, should_export_as_excel\n\n\n@logger_wraps()\ndef prepare_license_usage_message(room_id, bot_token, account_credentials=\"\"):\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open('./usage_test_large.json') as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n should_export_as_excel = False\n\n if request_successful:\n logger.info('done getting list of licenses')\n msg, should_export_as_excel = create_license_usage_message(cssm_license)\n return msg, should_export_as_excel\n else:\n logger.info('issue with generating the license')\n return \"**There was a problem retrieving license usage information. Click [here]({}) to log back in.**\".format(\n be_login_url), should_export_as_excel\n\n# This function creates an in memory excel file that can be packaged up into a message to Webex Teams.\n@logger_wraps()\ndef license_usage_excel_writer(cssm_df=None):\n output = io.BytesIO()\n writer = pd.ExcelWriter(output, engine='xlsxwriter', options={'remove_timezone': True})\n columns = ['accountName', 'virtualAccount', 'license', 'inUse', 'assignedLicenses_quantity','usage']\n\n cssm_df.to_excel(writer, sheet_name=\"Licenses\", startcol=0, startrow=0, columns=columns)\n writer.save()\n\n return output.getvalue()\n\n# Function called by smartdashpullbot to send license usage info. This function calls on the prepare and create functions\n# above to get the message to package up and send to webex teams in response to a request.\n@logger_wraps()\ndef send_license_usage(room_id, bot_token, account_credentials):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n msg, should_export_as_excel = prepare_license_usage_message(room_id, bot_token, account_credentials)\n\n post_data = {'roomId': room_id,\n 'markdown': msg}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n request_response_results = None\n\n # send an excel based report in case there were too many licenses to display on Teams.\n if should_export_as_excel:\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open('./usage_test_large.json') as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n\n if request_successful:\n logger.info('getting the licenses was successful')\n excel_output = license_usage_excel_writer(cssm_license.cssm_license_usage_df())\n\n filetype = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n my_fields1 = {'roomId': room_id,\n 'files': ('license_usage_export.xlsx', excel_output, filetype)}\n\n\n message_data = MultipartEncoder(fields=my_fields1)\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": message_data.content_type,\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_data=message_data)\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting license usage info was successful\")\n else:\n logger.info(\"posting license usage not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\ndef create_license_architecture_mix_message(cssm_license):\n msg = \"**There are no licenses being used. No architecture mix available!**\"\n\n tech_info_dict = cssm_license.cssm_top_license_technology_dict()\n\n\n if len(tech_info_dict) > 0:\n\n msg = '**Here is the architecture mix, by account**: \\n'\n\n for accountName, architecture_dict in tech_info_dict.items():\n msg = msg + '* **{}**\\n'.format(accountName)\n\n for architecture_name, usage_dict in architecture_dict.items():\n msg = msg + ' * {}: {:.1f}%\\n'.format(architecture_name, usage_dict['inUse'])\n\n return msg\n\n\n@logger_wraps()\ndef prepare_license_architecture_mix_message(room_id, bot_token, account_credentials=\"\", expiration_days=30):\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open(file_name) as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n if request_successful:\n logger.info('done getting list of licenses')\n return create_license_architecture_mix_message(cssm_license)\n else:\n logger.info('issue with generating the license')\n return \"**There was a problem retrieving architecture mix information. Click [here]({}) to log back in.**\".format(\n be_login_url)\n\n# Function called by smartdashpullbot to send architecture mix info. This function calls on the prepare and create functions\n# above to get the message to package up and send to webex teams in response to a request.\n@logger_wraps()\ndef send_license_architecture_mix(room_id, bot_token, account_credentials):\n\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id,\n 'markdown': prepare_license_architecture_mix_message(room_id, bot_token, account_credentials)}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting send_license_architecture_mix info was successful\")\n else:\n logger.info(\"posting send_license_architecture_mix not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n\n\n@logger_wraps()\ndef create_future_expired_licenses_message(cssm_license, expiration_days=30):\n expired_license_dict = cssm_license.cssm_future_expired_licenses(expiration_days=expiration_days)\n\n msg = \"**There are no licenses that expire in {} days!!**\"\n\n accounts_dict = expired_license_dict['future_expired_licenses']\n if len(accounts_dict) > 0:\n\n license_quantity = expired_license_dict['quantity']\n\n if license_quantity == 1:\n msg = \"**There is _*{}*_ license\".format(license_quantity)\n else:\n msg = \"**There are _*{}*_ licenses\".format(license_quantity)\n\n msg = msg + \" that will expire in {} days:**:\\n\".format(expiration_days)\n\n for account_key, virtual_accounts in accounts_dict.items():\n\n msg = msg + \"* **{}**\\n\".format(account_key)\n\n if len(virtual_accounts) > 0:\n for virtual_account_key, licenses in virtual_accounts.items():\n msg = msg + \" * {}\\n\".format(virtual_account_key)\n\n for license_key, license_details in licenses.items():\n if len(license_details) == 1:\n the_detail = license_details[0]\n msg = msg + \" * **{}**, Qty: {} expire on: {}\\n\".format(license_key,\n the_detail['quantity'],\n the_detail['endDate'])\n else:\n msg = msg + \" * **{}**\\n\".format(license_key)\n for license_detail in license_details:\n msg = msg + \" * Qty: {} expire on: {}\\n\".format(license_detail['quantity'],\n license_detail['endDate'])\n\n return msg.format(expiration_days)\n\n@logger_wraps()\ndef prepare_future_expired_licenses_message(room_id, bot_token, account_credentials=\"\", expiration_days=30):\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open(file_name) as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n if request_successful:\n logger.info('done getting list of licenses')\n return create_future_expired_licenses_message(cssm_license, expiration_days=expiration_days)\n else:\n logger.info('issue with generating the license')\n return \"**There was a problem retrieving expired licenses information. Click [here]({}) to log back in.**\".format(\n be_login_url)\n\n# Function called by smartdashpullbot to send information about licenses that will expire in X number of days info.\n# This function calls on the prepare and create functions above to get the message to package up and send to webex\n# teams in response to a request.\n@logger_wraps()\ndef send_future_expired_licenses(room_id, bot_token, account_credentials, expiration_days=30):\n logger.info('send_thirty_expired_licenses start')\n\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id, 'markdown': prepare_future_expired_licenses_message(room_id, bot_token, account_credentials,\n expiration_days=expiration_days)}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting license expiration info was successful\")\n else:\n logger.info(\"posting license expiration not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n\n@logger_wraps()\ndef create_expired_licenses_message(cssm_license):\n accounts_dict = cssm_license.cssm_expired_licenses()\n logger.info(accounts_dict)\n msg = \"**There are no expired licenses!!**\"\n\n if len(accounts_dict) > 0:\n\n msg = \"**Here are the *expired licenses*, grouped by Account and Virtual Account**:\\n\"\n for account_key in accounts_dict.keys():\n virtual_accounts = accounts_dict[account_key]\n msg = msg + \"* **{}**\\n\".format(account_key)\n\n if len(virtual_accounts) > 0:\n for virtual_account_key in virtual_accounts.keys():\n msg = msg + \" * {}\\n\".format(virtual_account_key)\n licenses = virtual_accounts[virtual_account_key]\n\n if len(licenses) > 0:\n for license_name, license_info in licenses.items():\n end_date = license_info['endDate']\n msg = msg + \" * {}, Qty: {}, expires: {}\\n\".format(license_name,\n license_info['quantity'],\n end_date)\n\n return msg\n\n\n@logger_wraps()\ndef prepare_expired_licenses_message(room_id, bot_token, account_credentials=\"\"):\n # For testing only until we can pull data from another source\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open(file_name) as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n if request_successful:\n logger.info('done getting list of licenses')\n return create_expired_licenses_message(cssm_license)\n else:\n logger.info('issue with generating the license')\n return \"**There was a problem retrieving expired licenses information. Click [here]({}) to log back in.**\".format(\n be_login_url)\n\n# Function called by smartdashpullbot to send expired license info. This function calls on the prepare and create functions\n# above to get the message to package up and send to webex teams in response to a request.\n@logger_wraps()\ndef send_expired_licenses(room_id, bot_token, account_credentials):\n logger.info('send_expired_licenses start')\n\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id, 'markdown': prepare_expired_licenses_message(room_id, bot_token, account_credentials)}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting account_names was successful\")\n else:\n logger.info(\"posting account_names not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n# in memory excel writer. This could be refactored with the other excel writer function above.\n@logger_wraps()\ndef license_excel_writer(cssm_df=None):\n output = io.BytesIO()\n writer = pd.ExcelWriter(output, engine='xlsxwriter', options={'remove_timezone': True})\n columns = ['accountName', 'accountDomain', 'accountStatus', 'accountType', 'role', 'virtualAccount',\n 'virtualAccount_status', 'statusMessage', 'license', 'assignedLicenses_quantity', 'inUse', 'available',\n 'ahaApps', 'billingType', 'pendingQuantity', 'reserved', 'isPortable', 'assignedLicenses_status',\n 'licenseType', 'quantity', 'startDate', 'endDate', 'subscriptionId', 'status']\n\n cssm_df.to_excel(writer, sheet_name=\"Licenses\", startcol=0, startrow=0, columns=columns)\n writer.save()\n\n return output.getvalue()\n\n# Function called by smartdashpullbot to send an export of all the license info. This function calls on the prepare\n# and create functions above to get the message to package up and send to webex teams in response to a request.\n@logger_wraps()\ndef send_license_export(room_id, bot_token, account_credentials):\n\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n my_fields1 = {'roomId': room_id}\n\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open(file_name) as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n\n request_response_results = None\n if request_successful:\n logger.info('getting the licenses was successful')\n excel_output = license_excel_writer(cssm_license.cssm_dataframe)\n msg = '**Here is the license export that you requested!**'\n\n filetype = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n\n my_fields1['files'] = ('license_export.xlsx', excel_output, filetype)\n my_fields1['markdown'] = msg\n\n message_data = MultipartEncoder(fields=my_fields1)\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": message_data.content_type,\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_data=message_data)\n else:\n my_fields1['markdown'] = \"**There was a problem pulling the info for your license export request. \" \\\n \"Click [here]({}) to log back in.**\".format(be_login_url)\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=my_fields1)\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting account_names was successful\")\n else:\n logger.info(\"posting account_names not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n\n@logger_wraps()\ndef create_virtual_accounts_message(cssm_license):\n accounts_dict = cssm_license.cssm_virt_account_by_accountName()\n\n msg = \"**Here are the *virtual accounts*, grouped by Account**:\\n\"\n for account in accounts_dict.keys():\n msg = msg + \"* **{}**\\n\".format(account)\n for virtual_account in accounts_dict[account]:\n msg = msg + \" * {}\\n\".format(virtual_account)\n\n return msg\n\n\n@logger_wraps()\ndef prepare_virtual_accounts_message(room_id, bot_token, account_credentials=\"\"):\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open(file_name) as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n if request_successful:\n logger.info('done getting list of licenses')\n return create_virtual_accounts_message(cssm_license)\n else:\n return \"**There was a problem retrieving the virtual accounts information. Click [here]({}) to log back in.**\".format(\n be_login_url)\n\n# Function called by smartdashpullbot to send virtual accounts info. This function calls on the prepare and create functions\n# above to get the message to package up and send to webex teams in response to a request.\n@logger_wraps()\ndef send_virtual_accounts(room_id, bot_token, account_credentials):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id, 'markdown': prepare_virtual_accounts_message(room_id, bot_token, account_credentials)}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting account_names was successful\")\n else:\n logger.info(\"posting account_names not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n\n@logger_wraps()\ndef create_account_names_message(cssm_license):\n logger.info('create_account_names_message start')\n\n account_names = cssm_license.account_names()\n logger.info('done retrieving account names')\n\n msg = \"Sorry, there aren't any accounts for your credentials!\"\n if len(account_names) > 0:\n msg = \"**Here are the requested accounts:**\\n\\n\"\n for name in account_names:\n msg = msg + '* {}\\n'.format(name)\n logger.info('create_account_names_message end')\n return msg\n\n\n@logger_wraps()\ndef prepare_account_names_message(room_id, bot_token, account_credentials=\"\"):\n request_successful = False\n cssm_license = None\n if ARE_DEBUGGING:\n # For testing only until we can pull data from another source\n with open(file_name) as json_data:\n json_array = json.load(json_data)\n parser = cssm_parser.CSSMJSONParser(json_array)\n cssm_license = parser.cssm_license()\n request_successful = True\n else:\n request_successful, cssm_license = get_cssm_license(room_id, bot_token, account_credentials)\n\n if request_successful:\n logger.info('done getting list of licenses')\n # print('{}, the json_array: {}'.format(json_array))\n logger.info('{}, prepare_account_names_message end'.format(dt.datetime.now()))\n return create_account_names_message(cssm_license)\n else:\n logger.info('could not get list of licenses')\n return \"**There was a problem retrieving the account names information. Click [here]({}) to log back in.**\".format(\n be_login_url)\n\n# Function called by smartdashpullbot to send smart account info. This function calls on the prepare and create functions\n# above to get the message to package up and send to webex teams in response to a request.\n@logger_wraps()\ndef send_account_names(room_id, bot_token, account_credentials):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id, 'markdown': prepare_account_names_message(room_id, bot_token, account_credentials)}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting account_names was successful\")\n else:\n logger.info(\"posting account_names not successful. Status code: {}\".format(request_response_results[1][\n \"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n# Pretty self explanatory. Put together the message for when a user types hello or help.\n@logger_wraps()\ndef prepare_hello_message():\n msg = '**Howdy, this is the Cisco Smart Licensing Dashboard Bot. What can I do for you?**\\n\\nThese are the things you can do:\\n'\n msg = msg + '* \\'show me the latest status\\' or \\'status\\' or \\'give me a status update\\'\\n' \\\n '* **Account Related:**\\n' \\\n ' * \\'give me a list of account names\\'\\n' \\\n ' * \\'give me a list of virtual accounts\\'\\n' \\\n '* **Licensing:**\\n' \\\n ' * \\'give me an export of licenses\\' or \\'export licenses\\'\\n' \\\n ' * \\'show me license usage\\' or \\'license usage\\' or \\'usage\\'\\n' \\\n ' * \\'show me the architecture mix\\' or \\'architecture mix\\'\\n' \\\n '* **Licensing Issues:**\\n' \\\n ' * **Expired Licenses Info**\\n' \\\n ' * \\'give me a list of expired licenses\\' or \\'expired licenses\\'\\n' \\\n ' * \\'show me licenses that expire in 30 days\\' or \\'expire 30 days\\' or \\'expire 30\\'\\n' \\\n ' * \\'show me licenses that expire in 60 days\\' or \\'expire 60 days\\' or \\'expire 60\\'\\n' \\\n ' * \\'show me licenses that expire in 90 days\\' or \\'expire 90 days\\' or \\'expire 90\\'\\n' \\\n ' * \\'show me licenses that expire in 180 days\\' or \\'expire 180 days\\' or \\'expire 180\\'\\n' \\\n ' * \\'show me licenses with shortages\\' or \\'license shortage list\\'\\n'\n return msg\n\n# send a message to the user that authentication failed.\n@logger_wraps()\ndef send_authentication_back(room_id, bot_token):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n msg = \"**There was a problem retrieving the account names information. Click [here]({}) to log back in.**\".format(be_login_url)\n\n post_data = {'roomId': room_id, 'markdown': msg}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting problem back was successful\")\n else:\n logger.info(\"posting problem back not successful. Status code: {}\".format(\n request_response_results[1][\"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n\n# Sent to the user when we don't understand what they were asking for.\n@logger_wraps()\ndef send_problem_back(room_id, bot_token):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n msg = 'Hi, sorry I did not understand your request. Try typing \\'help\\' to get a list of commands.'\n\n post_data = {'roomId': room_id, 'markdown': msg}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting problem back was successful\")\n else:\n logger.info(\"posting problem back not successful. Status code: {}\".format(\n request_response_results[1][\"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n\n@logger_wraps()\ndef send_hello_back(room_id, bot_token):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = {'roomId': room_id, 'markdown': prepare_hello_message()}\n\n logger.info('starting post')\n request_response_results = post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(bot_token)},\n post_json=post_data)\n logger.info('got response from post')\n\n request_response_is_successful = request_response_results[0]\n date_time = dt.datetime.now()\n if request_response_is_successful:\n logger.info(\"posting hello back was successful\")\n else:\n logger.info(\"posting hello back not successful. Status code: {}\".format(\n request_response_results[1][\"error_key\"]))\n logger.info(\"response from server: {}\".format(request_response_results[1][\"response_json_key\"]))\n\n# Basic function that does the heavy lifting of sending messages and files to Webex Teams.\n@logger_wraps()\ndef post_request(url, post_headers, post_data=None, post_json=None):\n spark_request = None\n if post_data:\n spark_request = requests.post(url,\n data=post_data,\n headers=post_headers)\n elif json:\n spark_request = requests.post(url, json=post_json, headers=post_headers)\n else:\n return [False, {\"error_key\": \"No json or data payload\"}]\n\n if spark_request.status_code == 200:\n return [True, {}]\n else:\n return [False, {\"error_key\": spark_request.status_code,\n \"response_json_key\": json.loads(spark_request.text)}]\n","repo_name":"CiscoSE/Smart-Licensing-Dashboard","sub_path":"WebexTeams/sld_pullbot_function.py","file_name":"sld_pullbot_function.py","file_ext":"py","file_size_in_byte":63287,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"41068460919","text":"\"\"\"\nJob Context\n\"\"\"\nimport utils.spark_utils as su\n\n\nclass JobContext(object):\n \"\"\"\n Provides a context to access frequently used objects/properties within \n an ETL workflow.\n\n Methods:\n __init__ - class constructor\n __enter__ - method to establish context\n __exit__ - method to gracefully exit context\n\n \"\"\"\n\n def __init__(self, app_name, environment, snapshot_dt):\n \"\"\"\n Class Constructor\n\n :param app_name: The name of the Spark Application\n :param environment: The environment the application will run in\n :snapshot_dt: The datetime when the application was run, which will\n dictate the lowest partition \n \"\"\"\n self.app_name = app_name\n self.environment = environment\n self.spark = su.get_spark(app_name)\n self.logger = su.get_logger(self.spark)\n self.snapshot_dt = snapshot_dt\n\n # Environment-specific variables will be set here\n self.bucket = \"fsd-test-1\" # Testing bucket\n\n def __enter__(self):\n \"\"\"Method to establish context\"\"\"\n # print(\"Executing snapshot date {}\".format(self.snapshot_dt))\n self.logger.info(\n \"Executing {} {} for snapshot date {}...\".format(\n self.environment, self.app_name, self.snapshot_dt))\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"\n Method to gracefully exit context\n\n :param exc_type: Exception Type\n :param exc_val: Exception Value\n :param exc_tb: Exception Tracback\n \"\"\"\n if exc_type:\n self.spark.stop()\n # print(\"Terminating run for snapshot date {}\".format(self.snapshot_dt))\n self.logger.info(\"Terminating run {} {} for snapshot date {}\"\n \" with ERROR {}: {}\".format(self.environment, self.app_name,\n self.snapshot_dt, exc_type.__name__, exc_val))\n else:\n self.spark.stop()\n # print(\"Finished run for snapshot date {}\".format(self.snapshot_dt))\n self.logger.info(\"Finished run {} {} for snapshot date {}\".format(\n self.environment, self.app_name, self.snapshot_dt))\n","repo_name":"JStolt/scintilla","sub_path":"context_managers/job_context.py","file_name":"job_context.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6440453983","text":"#!/usr/bin/env python3\r\n# coding: shift-jis\r\n\r\nimport redis\r\nimport shutil\r\nimport os\r\nimport datetime\r\nfrom setenv import RedisPwd\r\n\r\n### Redis info\r\nRedisHost = \"redis-13849.c9.us-east-1-4.ec2.cloud.redislabs.com\" \r\nRedisPort = \"13849\"\r\n\r\n### Redis connect\r\nr = redis.Redis(host=RedisHost, port=RedisPort, password=RedisPwd, db=0)\r\nr = redis.StrictRedis(host=RedisHost, port=RedisPort, password=RedisPwd, db=0)\r\n\r\n### Redis data get\r\nwith open('result-data.txt', 'wt') as f: # File\r\n res_keys = r.keys() # key\r\n if res_keys:\r\n res_mget = r.mget(res_keys) # mget\r\n for key, val in zip(res_keys, res_mget):\r\n print(val, file=f)\r\n\r\n### reset vars\r\nncount = 0\r\n\r\n### format data\r\nf = open('result-data.txt', 'r')\r\nline = f.readline()\r\nwhile line:\r\n dline = line\r\n rline = dline.replace('\\'', '')\r\n rline2 = rline.replace('b', '')\r\n rline3 = rline2.replace('\\n', '')\r\n fline = float(rline3)\r\n if fline < 0.6:\r\n ncount += 1\r\n line = f.readline()\r\nf.close()\r\n\r\n### set website file\r\npath1 = \"C:\\\\temp\\\\p4p\\\\project\\\\mysite\\\\defaultpage.txt\"\r\npath2 = \"C:\\\\temp\\\\p4p\\\\project\\\\mysite\\\\index.html\"\r\n\r\n### reset website file \r\nshutil.copy(path1,path2)\r\nncount = ncount*10\r\nsitcount = datetime.timedelta(seconds=ncount)\r\n\r\n### update website file\r\nwith open(\"C:\\\\temp\\\\p4p\\\\project\\\\mysite\\\\index.html\", \"r\",encoding='UTF-8') as f2:\r\n filedata = f2.read()\r\n filedata=filedata.replace(\"XXX\", str(sitcount))\r\n if ncount < 18000:\r\n filedata=filedata.replace(\"MMMMM\",\"時々立ち上がってストレッチしましょう。\")\r\n else:\r\n filedata=filedata.replace(\"MMMMM\",\"長時間座っているため気を付けましょう。\")\r\nwith open(r\"C:\\\\temp\\\\p4p\\\\project\\\\mysite\\\\index.html\",\"w\",encoding='UTF-8') as f3:\r\n f3.write(filedata)\r\n\r\n### update heroku\r\nos.chdir('mysite')\r\nos.system('git add .')\r\nos.system('git commit -m \"auto\"')\r\nos.system('git push heroku master')","repo_name":"kodtanactc/kodpersonal","sub_path":"get-data.py","file_name":"get-data.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4015747072","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\napi_data.py\n\nA python program to selectively interrogate the GW1100/GW2000 Wi-Fi Gateway API.\n\"\"\"\n\n# Python imports\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport socket\nimport struct\nimport time\n\n# Python 2/3 compatibility shims\nimport six\n\n\nVERSION = '0.1.0'\n\n# various defaults used throughout\n# default port used by GW1000/GW1100\ndefault_port = 45000\n# default network broadcast address - the address that network broadcasts are\n# sent to\ndefault_broadcast_address = '255.255.255.255'\n# default network broadcast port - the port that network broadcasts are sent to\ndefault_broadcast_port = 46000\n# default socket timeout\ndefault_socket_timeout = 2\n# default broadcast timeout\ndefault_broadcast_timeout = 5\n# default retry/wait time\ndefault_retry_wait = 10\n# default max tries when polling the API\ndefault_max_tries = 3\n# When run as a service the default age in seconds after which GW1000/GW1100\n# API data is considered stale and will not be used to augment loop packets\ndefault_max_age = 60\n# default device poll interval\ndefault_poll_interval = 20\n# default period between lost contact log entries during an extended period of\n# lost contact when run as a Service\ndefault_lost_contact_log_period = 21600\n# default battery state filtering\ndefault_show_battery = False\n# my API command vocabulary\ncommands = {\n 'CMD_WRITE_SSID': b'\\x11',\n 'CMD_BROADCAST': b'\\x12',\n 'CMD_READ_ECOWITT': b'\\x1E',\n 'CMD_WRITE_ECOWITT': b'\\x1F',\n 'CMD_READ_WUNDERGROUND': b'\\x20',\n 'CMD_WRITE_WUNDERGROUND': b'\\x21',\n 'CMD_READ_WOW': b'\\x22',\n 'CMD_WRITE_WOW': b'\\x23',\n 'CMD_READ_WEATHERCLOUD': b'\\x24',\n 'CMD_WRITE_WEATHERCLOUD': b'\\x25',\n 'CMD_READ_STATION_MAC': b'\\x26',\n 'CMD_GW1000_LIVEDATA': b'\\x27',\n 'CMD_GET_SOILHUMIAD': b'\\x28',\n 'CMD_SET_SOILHUMIAD': b'\\x29',\n 'CMD_READ_CUSTOMIZED': b'\\x2A',\n 'CMD_WRITE_CUSTOMIZED': b'\\x2B',\n 'CMD_GET_MulCH_OFFSET': b'\\x2C',\n 'CMD_SET_MulCH_OFFSET': b'\\x2D',\n 'CMD_GET_PM25_OFFSET': b'\\x2E',\n 'CMD_SET_PM25_OFFSET': b'\\x2F',\n 'CMD_READ_SSSS': b'\\x30',\n 'CMD_WRITE_SSSS': b'\\x31',\n 'CMD_READ_RAINDATA': b'\\x34',\n 'CMD_WRITE_RAINDATA': b'\\x35',\n 'CMD_READ_GAIN': b'\\x36',\n 'CMD_WRITE_GAIN': b'\\x37',\n 'CMD_READ_CALIBRATION': b'\\x38',\n 'CMD_WRITE_CALIBRATION': b'\\x39',\n 'CMD_READ_SENSOR_ID': b'\\x3A',\n 'CMD_WRITE_SENSOR_ID': b'\\x3B',\n 'CMD_READ_SENSOR_ID_NEW': b'\\x3C',\n 'CMD_WRITE_REBOOT': b'\\x40',\n 'CMD_WRITE_RESET': b'\\x41',\n 'CMD_WRITE_UPDATE': b'\\x43',\n 'CMD_READ_FIRMWARE_VERSION': b'\\x50',\n 'CMD_READ_USR_PATH': b'\\x51',\n 'CMD_WRITE_USR_PATH': b'\\x52',\n 'CMD_GET_CO2_OFFSET': b'\\x53',\n 'CMD_SET_CO2_OFFSET': b'\\x54',\n 'CMD_READ_RSTRAIN_TIME': b'\\x55',\n 'CMD_WRITE_RSTRAIN_TIME': b'\\x56',\n 'CMD_READ_RAIN': b'\\x57',\n 'CMD_WRITE_RAIN': b'\\x58'\n}\n# header used in each API command and response packet\nheader = b'\\xff\\xff'\n# known device models\nknown_models = ('GW1000', 'GW1100', 'GW2000', 'WH2650', 'WH2850', 'WN1900')\n\nmanifest = ['CMD_READ_SSSS', 'CMD_READ_SENSOR_ID_NEW', 'CMD_READ_RAINDATA', 'CMD_READ_RSTRAIN_TIME', 'CMD_READ_RAIN']\n\n\ndef hex_to_bytes(hex_string):\n \"\"\"Takes a string of hex character pairs and returns a string of bytes.\n\n Allows us to specify a byte string in a little more human readable format.\n Takes a space delimited string of hex pairs and converts to a string of\n bytes. hex_string pairs must be spaced delimited, eg 'AB 2E 3B'.\n\n If we only ran under python3 we could use bytes.fromhex(), but we need to\n cater for python2 as well so use struct.pack.\n \"\"\"\n\n # first get our hex string as a list of integers\n dec_list = [int(a, 16) for a in hex_string.split()]\n # now pack them in a sequence of bytes\n return struct.pack('B' * len(dec_list), *dec_list)\n\n\ndef bytes_to_hex(iterable, separator=' ', caps=True):\n \"\"\"Produce a hex string representation of a sequence of bytes.\"\"\"\n\n # assume 'iterable' can be iterated by iterbytes and the individual\n # elements can be formatted with {:02X}\n format_str = \"{:02X}\" if caps else \"{:02x}\"\n try:\n return separator.join(format_str.format(c) for c in six.iterbytes(iterable))\n except ValueError:\n # most likely we are running python3 and iterable is not a bytestring,\n # try again coercing iterable to a bytestring\n return separator.join(format_str.format(c) for c in six.iterbytes(six.b(iterable)))\n except (TypeError, AttributeError):\n # TypeError - 'iterable' is not iterable\n # AttributeError - likely because separator is None\n # either way we can't represent as a string of hex bytes\n return \"cannot represent '%s' as hexadecimal bytes\" % (iterable,)\n\n\ndef calc_checksum(data):\n \"\"\"Calculate the checksum for an API call or response.\"\"\"\n\n # initialise the checksum to 0\n checksum = 0\n # iterate over each byte in the response\n for b in six.iterbytes(data):\n # add the byte to the running total\n checksum += b\n # we are only interested in the least significant byte\n return checksum % 256\n\n\ndef discover():\n \"\"\"Discover any gateway devices on the local network.\"\"\"\n\n # create a socket object so we can broadcast to the network via\n # IPv4 UDP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # set socket datagram to broadcast\n s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n # set timeout\n s.settimeout(default_broadcast_timeout)\n # set TTL to 1 to so messages do not go past the local network\n # segment\n ttl = struct.pack('b', 1)\n s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)\n cmd_code = commands['CMD_BROADCAST']\n size = len(cmd_code) + 2\n body = b''.join([cmd_code, struct.pack('B', size)])\n checksum = calc_checksum(body)\n cmd_packet = b''.join([header, body, struct.pack('B', checksum)])\n print(\"%s (%s):\" % ('CMD_BROADCAST', bytes_to_hex(cmd_code)))\n print(\" Sending broadcast packet '%s' to '%s:%d'\" % (bytes_to_hex(cmd_packet),\n default_broadcast_address,\n default_broadcast_port))\n # initialise a list for the results as multiple GW1000/GW1100 may\n # respond\n result_list = []\n # send the Broadcast command\n s.sendto(cmd_packet, (default_broadcast_address, default_broadcast_port))\n # obtain any responses\n while True:\n try:\n response = s.recv(1024)\n # print the response if debug is high enough\n print(\" Received broadcast response '%s'\" % (bytes_to_hex(response),))\n except socket.timeout:\n # if we timeout then we are done\n break\n except socket.error:\n # raise any other socket error\n raise\n else:\n try:\n check_response(response, commands['CMD_BROADCAST'])\n except Exception as e:\n # Some other error occurred in check_response(),\n # perhaps the response was malformed. Log the stack\n # trace but continue.\n print(\" Unexpected exception occurred while checking response \"\n \"to command '%s': %s\" % ('CMD_BROADCAST', e))\n else:\n # we have a valid response so decode the response\n # and obtain a dict of device data\n device = decode_broadcast_response(response)\n # if we haven't seen this MAC before attempt to obtain\n # and save the device model then add the device to our\n # results list\n if not any((d['mac'] == device['mac']) for d in result_list):\n result_list.append(device)\n # close our socket\n s.close()\n # now return our results\n return result_list\n\n\ndef decode_broadcast_response(raw_data):\n \"\"\"Decode a broadcast response and return the results as a dict.\"\"\"\n\n # obtain the response size, it's a big endian short (two byte)\n # integer\n resp_size = struct.unpack('>H', raw_data[3:5])[0]\n # now extract the actual data payload\n data = raw_data[5:resp_size + 2]\n # initialise a dict to hold our result\n data_dict = dict()\n # extract and decode the MAC address\n data_dict['mac'] = bytes_to_hex(data[0:6], separator=\":\")\n # extract and decode the IP address\n data_dict['ip_address'] = '%d.%d.%d.%d' % struct.unpack('>BBBB',\n data[6:10])\n # extract and decode the port number\n data_dict['port'] = struct.unpack('>H', data[10: 12])[0]\n # get the SSID as a bytestring\n ssid_b = data[13:]\n # create a format string so the SSID string can be unpacked into its\n # bytes, remember the length can vary\n ssid_format = \"B\" * len(ssid_b)\n # unpack the SSID bytestring, we now have a tuple of integers\n # representing each of the bytes\n ssid_t = struct.unpack(ssid_format, ssid_b)\n # convert the sequence of bytes to unicode characters and assemble\n # as a string and return the result\n data_dict['ssid'] = \"\".join([chr(x) for x in ssid_t])\n # return the result dict\n return data_dict\n\n\ndef check_response(response, cmd_code):\n \"\"\"Check the validity of an API response.\"\"\"\n\n # first check that the 3rd byte of the response is the command code\n # that was issued\n if six.indexbytes(response, 2) == six.byte2int(cmd_code):\n # now check the checksum\n c_checksum = calc_checksum(response[2:-1])\n resp_checksum = six.indexbytes(response, -1)\n if c_checksum == resp_checksum:\n # checksum check passed, response is deemed valid\n return\n else:\n # checksum check failed, raise an InvalidChecksum exception\n _msg = \" Invalid checksum in API response. \" \\\n \"Expected '%s' (0x%s), received '%s' (0x%s).\" % (calc_checksum,\n \"{:02X}\".format(c_checksum),\n resp_checksum,\n \"{:02X}\".format(resp_checksum))\n else:\n # command code check failed, raise an InvalidApiResponse\n # exception\n exp_int = six.byte2int(cmd_code)\n resp_int = six.indexbytes(response, 2)\n _msg = \" Invalid command code in API response. \" \\\n \"Expected '%s' (0x%s), received '%s' (0x%s).\" % (exp_int,\n \"{:02X}\".format(exp_int),\n resp_int,\n \"{:02X}\".format(resp_int))\n\n\ndef send_cmd(ip_address, port, command=None, cmd=None,\n max_tries=default_max_tries,\n retry_wait=default_retry_wait,\n socket_timeout=default_socket_timeout):\n \"\"\"Send a command to the device API and return the response.\"\"\"\n\n cmd_code = cmd if cmd is not None else commands[command]\n size = len(cmd_code) + 2\n body = b''.join([cmd_code, struct.pack('B', size)])\n checksum = calc_checksum(body)\n cmd_packet = b''.join([header, body, struct.pack('B', checksum)])\n print(\"%s (%s):\" % (command, bytes_to_hex(cmd_code)))\n print(\"%12s: %s\" % ('sending', bytes_to_hex(cmd_packet)))\n for attempt in range(max_tries):\n response = None\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(socket_timeout)\n try:\n s.connect((ip_address, port))\n s.sendall(cmd_packet)\n response = s.recv(1024)\n except socket.error:\n raise\n finally:\n s.close()\n except socket.timeout as e:\n print(\"Failed to obtain response to attempt %d to \"\n \"send command '%s': %s\" % (attempt + 1, cmd_code, e))\n except Exception as e:\n print(\"Failed attempt %d to send command '%s': %s\" % (attempt + 1, cmd_code, e))\n else:\n if six.indexbytes(response, 2) == six.byte2int(cmd_code):\n csum = 0\n for b in six.iterbytes(response[2:-1]):\n csum += b\n checksum = csum % 256\n if checksum != six.indexbytes(response, -1):\n _msg = \"Invalid checksum in API response. \" \\\n \"Expected '%s' (0x%s), received '%s' (0x%s).\" % (checksum,\n \"{:02X}\".format(checksum),\n six.indexbytes(response, -1),\n \"{:02X}\".format(six.indexbytes(response, -1)))\n print(_msg)\n if attempt < max_tries - 1:\n time.sleep(retry_wait)\n continue\n else:\n break\n else:\n _msg = \"Invalid command code in API response. \" \\\n \"Expected '%s' (0x%s), received '%s' (0x%s).\" % (six.byte2int(cmd_code),\n \"{:02X}\".format(six.byte2int(cmd_code)),\n six.indexbytes(response, 2),\n \"{:02X}\".format(six.indexbytes(response, 2)))\n print(_msg)\n if attempt < max_tries - 1:\n time.sleep(retry_wait)\n continue\n print(\"%12s: %s\" % ('received', bytes_to_hex(response)))\n return response\n\n\ndef gather_api_data(ip_address, port, cmd=None,\n max_tries=default_max_tries,\n retry_wait=default_retry_wait,\n socket_timeout=default_socket_timeout):\n \"\"\"Collect and display API response data.\"\"\"\n\n if cmd is not None:\n cmd_code = hex_to_bytes(cmd)\n response = send_cmd(cmd=cmd_code, ip_address=ip_address, port=port,\n max_tries=max_tries, retry_wait=retry_wait, socket_timeout=socket_timeout)\n else:\n # first discover any available gateway devices\n device_list = discover()\n for device in device_list:\n print(\" Discovered device: %s\" % (device,))\n # now try to identify the model of the specified device\n response = send_cmd(command='CMD_READ_FIRMWARE_VERSION', ip_address=ip_address, port=port,\n max_tries=max_tries, retry_wait=retry_wait, socket_timeout=socket_timeout)\n model = None\n _firmware_t = struct.unpack(\"B\" * len(response), response)\n _firmware_str = \"\".join([chr(x) for x in _firmware_t[5:5 + _firmware_t[4]]])\n if _firmware_str is not None:\n for m in known_models:\n if m in _firmware_str.upper():\n model = m\n break\n if model is not None:\n print(\"Device appears to be a '%s'\" % model)\n else:\n print(\"Device model is unknown\")\n # now issue all api_commands in the manifest\n for command in manifest:\n response = send_cmd(command=command, ip_address=ip_address, port=port, max_tries=max_tries,\n retry_wait=retry_wait, socket_timeout=socket_timeout)\n\n\n# To run this code on setup.py installs use:\n#\n# $ PYTHONPATH=/home/weewx/bin python -m user.gw1000 --run --ip-address=IP_ADDRESS\n#\n# or for package installs use:\n#\n# $ PYTHONPATH=/usr/share/weewx python -m user.gw1000 --run --ip-address=IP_ADDRESS\n#\n# Depending on your system you may need change 'python' in the above api_commands\n# to 'python2' or 'python3'.\n\ndef main():\n import optparse\n\n usage = \"\"\"Usage: python -m user.api_data --help\n python -m user.api_data --version\n python -m user.api_data --run\n --ip-address=IP_ADDRESS\n [--port=PORT]\"\"\"\n\n parser = optparse.OptionParser(usage=usage)\n parser.add_option('--version', dest='version', action='store_true',\n help='display version number')\n parser.add_option('--run', dest='run', action='store_true',\n help='gather data from the device API')\n parser.add_option('--ip-address', dest='ip_address',\n help='device IP address to use')\n parser.add_option('--port', dest='port', type=int,\n help='device port to use')\n parser.add_option('--cmd', dest='cmd',\n help=\"command code to issue, must be in format xy \"\n \"where x and y are hexadecimal digits\")\n (opts, args) = parser.parse_args()\n\n # display version number\n if opts.version:\n print(\"version: %s\" % VERSION)\n elif opts.run:\n if opts.ip_address is not None:\n port = opts.port if opts.port is not None else default_port\n if opts.cmd is not None:\n gather_api_data(opts.ip_address, port, cmd=str(opts.cmd))\n else:\n gather_api_data(opts.ip_address, port)\n else:\n print()\n print(\"You must use the --ip-address option to specify an IP address to use.\")\n print(\"Exiting.\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gjr80/weewx-gw1000","sub_path":"bin/user/api_data.py","file_name":"api_data.py","file_ext":"py","file_size_in_byte":17662,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"81"} +{"seq_id":"35141930926","text":"from PyQt6.QtCore import pyqtSlot, Qt\nfrom PyQt6.QtWidgets import QWidget, QSlider, QTextBrowser, QHBoxLayout, QTextEdit, QPushButton, QApplication, \\\n QGridLayout, QLabel\n\n\nclass CentralWidget(QWidget):\n def __init__(self, parent=None):\n super(CentralWidget, self).__init__(parent)\n\n self.text_edit = QTextBrowser(self)\n self.text_edit.setText(\"Started app\")\n\n self.slider = QSlider(self)\n self.slider.setRange(50, 75)\n self.slider.setValue(60)\n self.slider.valueChanged.connect(self.append_text)\n self.slider.setOrientation(Qt.Orientation.Horizontal)\n\n\n self.pushbutton = QPushButton(self)\n self.pushbutton.show()\n self.pushbutton.setText(\"Schließen\")\n self.pushbutton.clicked.connect(QApplication.instance().quit)\n\n self.label1 = QLabel(self)\n self.label1.setText(\"Slider\")\n\n self.label2 = QLabel(self)\n self.label2.setText(\"Textfeld\")\n\n # A1: Umstellung auf QGridLayout mit vertikaler Orientierung\n #layout = QHBoxLayout(self)\n #layout.addWidget(self.pushbutton)\n #layout.addWidget(self.slider)\n #layout.addWidget(self.text_edit)\n\n layout = QGridLayout(self)\n layout.addWidget(self.pushbutton, 3, 2)\n layout.addWidget(self.slider, 1, 2)\n layout.addWidget(self.text_edit, 2, 2)\n layout.addWidget(self.label1, 1, 1)\n layout.addWidget(self.label2, 2, 1, Qt.AlignmentFlag.AlignTop)\n\n\n self.setLayout(layout)\n\n @pyqtSlot(int)\n def append_text(self, value_as_int):\n text = \"Value Changed: \" + str(value_as_int)\n self.text_edit.append(text)\n","repo_name":"GregGun/Einfuehrung","sub_path":"CentralWidget.py","file_name":"CentralWidget.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70023646666","text":"# Creating Numpy Arrays\n\nimport numpy as np\n\n# converting from a list\nmy_list1 = [1,2,3,4]\nmy_array1 = np.array(my_list1)\nmy_array1\n\n# Make another list\nmy_list2 = [11,22,33,44]\n\n# Make a list of lists\nmy_lists = [my_list1, my_list2]\n\n# Make multi-dimensional array\nmy_array2 = np.array(my_lists)\n\n# Show array\nmy_array2\n\n# Get the size of the array\nmy_array2.shape # 2 rows, 4 columns\n\n# Find the data type of the array\nmy_array2.dtype\n\n# Making special case arrays\n\nnp.zeros(5) # zero array, 1x5\n\nnp.ones((5,5)) # ones array, 5x5\n\nnp.empty(5) # empty array, 1x5\n\nnp.empty((3,4)) # empty array, 3x4\n\nnp.eye(5) # Identity matrix array, 5x5\n\n# Using a range\nnp.arange(5)\n\n","repo_name":"colson1111/Udemy","sub_path":"Python_Data_Analytics/Lecture_07_Creating_Arrays.py","file_name":"Lecture_07_Creating_Arrays.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70923302664","text":"import torch\nfrom torch_geometric.nn import MessagePassing\nimport torch.nn.functional as F\nfrom torch_geometric.utils import degree\n\nfrom layers.encoders import EdgeEncoder\n\n\nclass GCNConv(MessagePassing):\n def __init__(self, dataset_name, edge_dim, emb_dim):\n super(GCNConv, self).__init__(aggr=\"add\")\n self.edge_dim = edge_dim\n self.linear = torch.nn.Linear(emb_dim, emb_dim)\n self.root_emb = torch.nn.Embedding(1, emb_dim)\n self.edge_encoder = EdgeEncoder(dataset_name, edge_dim, emb_dim)\n\n def forward(self, x, edge_index, edge_attr):\n x = self.linear(x)\n row, col = edge_index\n\n deg = degree(row, x.size(0), dtype=x.dtype) + 1\n deg_inv_sqrt = deg.pow(-0.5) # D\n deg_inv_sqrt[deg_inv_sqrt == float(\"inf\")] = 0\n\n norm = deg_inv_sqrt[row] * deg_inv_sqrt[col] # norm for graph\n if edge_attr != None and len(edge_attr.shape) != 1:\n # for have edge_attr situation\n edge_embedding = self.edge_encoder(edge_attr)\n return self.propagate(\n edge_index, x=x, edge_attr=edge_embedding, norm=norm\n ) + F.relu(x + self.root_emb.weight) * 1.0 / deg.view(-1, 1)\n else:\n # for no edge_attr situation\n edge_embedding = 0\n return self.propagate(\n edge_index,\n x=x,\n norm=norm,\n edge_attr=edge_embedding,\n use_edge_attr=False,\n ) + F.relu(x + self.root_emb.weight) * 1.0 / deg.view(-1, 1)\n\n def message(self, x_j, edge_attr, norm):\n if edge_attr != None:\n # for have edge_attr situation\n return norm.view(-1, 1) * F.relu(x_j + edge_attr)\n else:\n # for no edge_attr situation\n return norm.view(-1, 1) * x_j\n\n def update(self, aggr_out):\n return aggr_out\n\n\nclass GCNConvwithAdj(torch.nn.Module):\n # the edge_attr is not encoded in SingleGCNwithAdj\n def __init__(self, in_dim, emb_dim, drop_ratio, device, bias=True):\n super(GCNConvwithAdj, self).__init__()\n self.drop_ratio = drop_ratio\n self.emb_dim = emb_dim\n self.in_dim = in_dim\n self.weight = torch.nn.Parameter(torch.FloatTensor(self.in_dim , self.emb_dim).to(device))\n if bias:\n self.bias = torch.nn.Parameter(torch.FloatTensor(self.emb_dim).to(device))\n else:\n self.bias = None\n\n def forward(self, h, adj):\n h = F.dropout(h, self.drop_ratio, training = self.training)\n h = torch.matmul(adj, h)\n h = torch.matmul(h, self.weight)\n if self.bias is not None:\n h = h + self.bias\n return h","repo_name":"codingClaire/GraphPoolingGarden","sub_path":"graphpoolinggarden/layers/gcn_layer.py","file_name":"gcn_layer.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"9735270804","text":"import logging\n\nimport sys\n\n\nclass LogFilter(logging.Filter):\n '''Filters (lets through) all messages with level < LEVEL'''\n\n def __init__(self, level):\n self.level = level\n\n def filter(self, record):\n return record.levelno < self.level # \"<\" instead of \"<=\": since logger.setLevel is inclusive, this should be exclusive\n\n\nMIN_LEVEL = logging.DEBUG\nstdout_hdlr = logging.StreamHandler(sys.stdout)\nstderr_hdlr = logging.StreamHandler(sys.stderr)\nlog_filter = LogFilter(logging.WARNING)\nstdout_hdlr.addFilter(log_filter)\nstdout_hdlr.setLevel(MIN_LEVEL)\nstderr_hdlr.setLevel(max(MIN_LEVEL, logging.WARNING))\n\nformatter = logging.Formatter('%(message)s')\nstdout_hdlr.setFormatter(formatter)\nstderr_hdlr.setFormatter(formatter)\n\nlog = logging.getLogger()\nlog.addHandler(stdout_hdlr)\nlog.addHandler(stderr_hdlr)\nlog.setLevel(logging.DEBUG)\n","repo_name":"Negashev/hade","sub_path":"server/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"13252270956","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\nimport logging\n\nimport alias.db\n\n#-----------------------------------------------------------------------------\n# Function Definitions\n#-----------------------------------------------------------------------------\ndef __get_names(name):\n '''\n Get the formatted name if available, if not build the name from the given\n and family names.\n '''\n logger.debug('Processing name.')\n name_list = []\n\n if (name == []) or (name is None):\n return name_list\n\n if name.get('formatted') is not None:\n name_list.append(unicode(name['formatted']))\n else:\n given = name.get('givenName', u'')\n family = name.get('familyName', u'')\n name_list.append(u'{0} {1}'.format(given, family).strip())\n\n return name_list\n\n\ndef __get_locations(location):\n '''\n Get the location data.\n '''\n logger.debug('Processing locations.')\n locations = []\n\n if location is not None:\n locations.append(location)\n\n return locations\n\n\ndef __get_emails(emails, ims):\n '''\n Pull any email addresses from the emails list and from the IMs list.\n '''\n logger.debug('Processing email addresses.')\n email_list = []\n\n if emails is not None:\n e = [e['value'] for e in emails]\n email_list.extend(e)\n\n if ims is not None:\n for i in ims:\n if '@' in i['value']:\n email_list.append(i['value'])\n\n return email_list\n\n\ndef __get_accounts(accounts):\n '''\n Pull any additional usernames from the accounts list.\n '''\n logger.debug('Processing accounts.')\n acct_list = []\n\n if accounts is not None:\n for a in accounts:\n acct_list.append('{0} ({1})'.format(a['username'], a['shortname']))\n\n return acct_list\n\n\ndef __get_urls_from_accounts(accounts):\n '''\n Pull any additional URLs from the accounts list.\n '''\n logger.debug('Processing URLs.')\n url_list = []\n\n if accounts is not None:\n for a in accounts:\n url_list.append(a.get('url', '').replace('\\/', '/'))\n\n return url_list\n\n\ndef __get_nyms(ims):\n '''\n Pull any additional usernames from the IMs list.\n '''\n logger.debug('Processing IM list.')\n nym_list = []\n\n if ims is not None:\n for i in ims:\n nym_list.append('{0} ({1})'.format(i['value'], i['type']))\n\n return nym_list\n\n\ndef __get_images(data):\n '''\n Pull any image links from the gravatar data.\n '''\n logger.debug('Processing image list.')\n image_list = []\n\n if data.get('thumbnailUrl') is not None:\n image_list.append(data['thumbnailUrl'])\n\n if data.get('profileBackground') is not None:\n if data['profileBackground'].get('url') is not None:\n image_list.append(data['profileBackground']['url'])\n\n if data.get('photos') is not None:\n for u in data.get('photos'):\n image_list.append(u['value'])\n\n return image_list\n\n\ndef __get_urls(data):\n '''\n Pull any urls from the gravatar data.\n '''\n logger.debug('Processing URLs.')\n url_list = []\n\n if data.get('urls') is not None:\n for u in data.get('urls'):\n url_list.append(u['value'])\n\n return url_list\n\n\ndef __get_descriptions(description):\n '''\n Get the description data.\n '''\n logger.debug('Processing description.')\n descriptions = []\n\n if description is not None:\n descriptions.append(description)\n\n return descriptions\n\n\ndef __process_results(result):\n '''\n Get each of the data items we are looking for from the result and write\n them to the databases.\n '''\n logger.debug('Processing result.')\n\n username = result[0]\n\n if result[1] is not None:\n data = result[1]['entry'][0]\n emails = __get_emails(data.get('emails'), data.get('ims'))\n for email in sorted(set(emails)):\n alias.db.add_target_email(username, email)\n\n nyms = __get_nyms(data.get('ims'))\n for nym in sorted(set(nyms)):\n alias.db.add_target_nym(username, nym)\n\n accts = __get_accounts(data.get('accounts'))\n for acct in sorted(set(accts)):\n alias.db.add_target_nym(username, acct)\n \n urls = __get_urls(data)\n for url in sorted(set(urls)):\n alias.db.add_target_url(username, url)\n\n urls = __get_urls_from_accounts(data.get('accounts'))\n for url in sorted(set(urls)):\n alias.db.add_target_url(username, url)\n\n locations = __get_locations(data.get('currentLocation'))\n for loc in sorted(set(locations)):\n alias.db.add_target_location(username, loc)\n\n names = __get_names(data.get('name'))\n for name in sorted(set(names)):\n alias.db.add_target_name(username, name)\n\n descriptions = __get_descriptions(data.get('aboutMe'))\n for desc in descriptions:\n alias.db.add_target_description(username, desc)\n\n # If we have valid data add this target to the gravatar source list\n alias.db.add_target_to_source_list(username, 'gravatar')\n\n alias.db.mark_source_complete(username, 'gravatar')\n\n\ndef __lookup_user(user):\n '''\n Lookup the user. Sometimes Gravatar will have an alternate username as\n the primary username on the account. We track that username using the\n user['gvuser'] value.\n '''\n\n try:\n url = 'http://en.gravatar.com/{0}.json'.format(user['gvuser'])\n resp = requests.get(url, allow_redirects=False)\n if resp.status_code == 404:\n __process_results((user['user'], None))\n return None\n\n elif resp.status_code == 302:\n location = resp.headers['location']\n if location == '/profiles/no-such-user':\n __process_results((user['user'], None))\n return None\n\n if not location.startswith('http://en.gravatar.com'):\n user['gvuser'] = location.lstrip('/').lower()\n return user \n \n else:\n __process_results((user['user'], resp.json()))\n return None\n\n except Exception as e:\n logger.debug(str(e))\n return None\n\n\n#-----------------------------------------------------------------------------\n# Main Program\n#-----------------------------------------------------------------------------\nlogger = logging.getLogger('Gravatar')\n\ndef lookup():\n logger.info('Starting Gravatar lookup.')\n \n # Load targets from the database.\n count = 0\n logger.info('Getting unprocessed Gravatar usernames from database.')\n for target in alias.db.get_unchecked_targets('gravatar', 'user'):\n count += 1\n\n # Skip targets with a . in them.\n if target.find('.') != -1:\n continue\n\n resp = __lookup_user({'user': target, 'gvuser': target})\n if resp is not None:\n # \n # When the response is not None, the user has an alternate name on\n # Gravatar. Lookup that name instead.\n __lookup_user(resp)\n\n if count % 1000 == 0:\n logger.info('Processed {0} Gravatar users.'.format(count))\n\n logger.info('Processed {0} Gravatar users.'.format(count))\n logger.info('Finished Gravatar lookup.')\n\n return None\n","repo_name":"averagesecurityguy/alias","sub_path":"alias/lookup/gravatar.py","file_name":"gravatar.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"26353711598","text":"from models.email import Email\n\nfrom .base_producer import BaseProducer\n\n\nTOPIC = 'ECOMMERCE_SEND_EMAIL'\n\n\nclass EmailProducer(BaseProducer):\n def __init__(self, topic=TOPIC):\n super(EmailProducer, self).__init__(topic, 0)\n\n async def send(self, email: Email):\n await self._send(email.to_dict())","repo_name":"SergioVenicio/kafka-alura","sub_path":"app/producers/email_producer.py","file_name":"email_producer.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72042650184","text":"import torch\nfrom gpt2.modeling import Past\nfrom gpt2.translation import TranslationSpec, TranslationConfig\nfrom typing import List, Optional, Tuple\n\nclass Translator(object):\n def __init__(self, spec: TranslationSpec, config: TranslationConfig):\n self.spec = spec\n self.config = config\n \n def initialize(self, from_model: Optional[str] = None):\n self.spec.initialize()\n self.model = self.spec.construct_model().eval()\n \n if from_model:\n ckpt = torch.load(from_model, map_location='cpu')\n self.model.load_state_dict(ckpt['model'])\n \n if self.config.use_gpu:\n self.model.cuda().half()\n \n def translate(self, source: str) -> str:\n words = self.spec.encode_context(source.lower())\n \n current, past = words, None\n while len(words) < self.config.seq_len:\n # Predict the next word token from the given context.\n probs, past = self._predict_probs(current, past)\n next_word = self._sample_from_top_p(probs)\n \n # Change the context to the predicted word.\n words.append(next_word)\n current = [next_word]\n return self.spec.decode_tokens(words)\n \n def translate_with_attn(self, source: str) -> Tuple[str, List[int], torch.tensor]:\n words = self.spec.encode_context(source.lower())\n current, past = words, None\n attn = torch.zeros((self.spec.heads, self.config.seq_len, self.config.seq_len))\n input_attn = None\n idx = 0\n while len(words) < self.config.seq_len:\n # Predict the next word token from the given context.\n probs, past, _attn = self._predict_probs_attn(current, past)\n if idx == 0:\n _attn[_attn < 0] = 0\n input_attn = _attn\n else:\n for j in range(self.spec.heads):\n shape = _attn[j].shape\n _attn[_attn < 0] = 0\n attn[j, len(words):len(words) + shape[0], :shape[1]] = _attn[j]\n idx += 1\n next_word = self._sample_from_top_p(probs)\n # Change the context to the predicted word.\n words.append(next_word)\n current = [next_word]\n if next_word == self.spec.vocab.eos_idx:\n break\n return self.spec.decode_tokens(words), words, attn[:,:len(words),:len(words)], input_attn\n \n @torch.no_grad()\n def _predict_probs(self,\n words: List[int],\n past: Optional[List[Past]] = None\n ) -> Tuple[torch.Tensor, List[Past]]:\n x = torch.tensor(words, dtype=torch.long)\n x = self.spec.decorate_sequence(\n x, offset=past[0][0].size(-2) if past is not None else 0)\n \n if self.config.use_gpu:\n logits, past = self.model(x.cuda(), past)\n logits = logits.cpu().float()\n else:\n logits, past = self.model(x, past)\n \n return logits[-1, :].softmax(-1), past\n \n @torch.no_grad()\n def _predict_probs_attn(self,\n words: List[int],\n past: Optional[List[Past]] = None\n ) -> Tuple[torch.Tensor, List[Past]]:\n \n x = torch.tensor(words, dtype=torch.long)\n x = self.spec.decorate_sequence(\n x, offset=past[0][0].size(-2) if past is not None else 0)\n \n if self.config.use_gpu:\n logits, past, attn = self.model.forward_attn(x.cuda(), past)\n logits, attn = logits.cpu().float(), attn.cpu().float()\n else:\n logits, past, attn = self.model.forward_attn(x, past)\n \n return logits[-1, :].softmax(-1), past, attn\n \n @torch.no_grad()\n def _predict_probs(self,\n words: List[int],\n past: Optional[List[Past]] = None\n ) -> Tuple[torch.Tensor, List[Past]]:\n \n x = torch.tensor(words, dtype=torch.long)\n x = self.spec.decorate_sequence(\n x, offset=past[0][0].size(-2) if past is not None else 0)\n \n if self.config.use_gpu:\n logits, past, attn = self.model.forward_attn(x.cuda(), past)\n logits, attn = logits.cpu().float(), attn.cpu().float()\n else:\n logits, past, attn = self.model.forward_attn(x, past)\n \n return logits[-1, :].softmax(-1), past, attn\n \n def _sample_from_top_p(self, probs: torch.Tensor) -> int:\n return probs.argmax().item()","repo_name":"azadyasar/GPT2","sub_path":"src/gpt2/translation/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13577265996","text":"import pymongo\n\n# Replace the uri string with your MongoDB deployment's connection string.\nconn_str = \"mongodb+srv://Aditya:applevibe@cluster0.ovdha.mongodb.net/Applevibe?retryWrites=true&w=majority\"\n\n\n# set a 5-second connection timeout\nclient = pymongo.MongoClient(conn_str, serverSelectionTimeoutMS=5000)\n\ndb = client[\"Applevibe\"]\ncollection = db[\"users\"]\n\n\"\"\"\n#Push Data\npost ={\"_id\":0, \"name\": \"Aditya\", \"score\": 5}\n\ncollection.insert_one(post)\n\"\"\"\n\n\"\"\"\n#Get data\nx = collection.find_one()\n\nprint(x)\n\"\"\"\n\nmyquery = { \"name\": \"Not Aditya\" }\n\nmyquery = { \"name\": \"Aditya\" }\nnewvalues = { \"$set\": { \"name\": \"Not Aditya\" } }\n\ncollection.update_one(myquery, newvalues)\n\n\nfor x in collection.find():\n print(x)\n","repo_name":"aditya-s3n/Apple-Vibe","sub_path":"Backend/MongoDB test.py","file_name":"MongoDB test.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7527300063","text":"import json\nimport time\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport psycopg2\nimport psycopg2.extras\n\nimport urllib.parse as urlparse\nimport os\nfrom lotify.client import Client\n\nlotify = Client()\nnotify = os.getenv('LINE_NOTIFY_TOKEN')\n\nURL = urlparse.urlparse(os.getenv('DATABASE_URI'))\nDB_NAME = URL.path[1:]\nUSER = URL.username\nPASSWORD = URL.password\nHOST = URL.hostname\nPORT = URL.port\n\n\nclass Database:\n conns = []\n\n def __enter__(self):\n return self\n\n def connect(self):\n conn = psycopg2.connect(\n dbname=DB_NAME,\n user=USER,\n password=PASSWORD,\n host=HOST,\n port=PORT\n )\n self.conns.append(conn)\n\n return conn\n\n def __exit__(self, type, value, traceback):\n for conn in self.conns:\n conn.close()\n\n self.conns.clear()\n\n\ndef db_table_check():\n try:\n with Database() as db, db.connect() as conn, conn.cursor(\n cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n cur.execute(f'''\n CREATE TABLE public.stream\n (\n id serial NOT NULL PRIMARY KEY,\n link character varying(255) COLLATE pg_catalog.\"default\",\n image character varying(255) COLLATE pg_catalog.\"default\",\n title character varying(100) COLLATE pg_catalog.\"default\",\n is_live boolean DEFAULT false,\n CONSTRAINT stream_unique UNIQUE (link, image, title)\n INCLUDE(link, image, title)\n )\n TABLESPACE pg_default;\n \n ALTER TABLE public.game\n OWNER to {USER};\n ALTER TABLE public.stream\n OWNER to {USER};\n ''')\n conn.commit()\n except psycopg2.errors.DuplicateTable:\n print('Tables have been create.')\n pass\n except Exception as e:\n raise Exception(e)\n\n\ndef stream_parser():\n yt = requests.get(\n 'https://www.youtube.com/c/PLEAGUEofficial/videos?view=2&sort=dd&live_view=502&shelf_id=2')\n message = BeautifulSoup(yt.content, 'html.parser')\n video_scripts = message.find_all('script')\n bs_to_string = str(video_scripts[32])\n variable_string = bs_to_string.split('var ytInitialData = ')[1].split(';')[0]\n variable_dict = json.loads(variable_string)\n clean_list = \\\n variable_dict[\"contents\"][\"twoColumnBrowseResultsRenderer\"][\"tabs\"][1][\"tabRenderer\"][\n \"content\"][\n \"sectionListRenderer\"][\"contents\"][0][\"itemSectionRenderer\"][\"contents\"][0][\n \"gridRenderer\"][\"items\"]\n streams = []\n for data in clean_list:\n if data.get(\"gridVideoRenderer\") is None:\n break\n image = data[\"gridVideoRenderer\"][\"thumbnail\"]['thumbnails'][3][\"url\"].split('?sqp')[0]\n title = data[\"gridVideoRenderer\"][\"title\"][\"runs\"][0][\"text\"]\n link_path = \"https://www.youtube.com/\" + \\\n data[\"gridVideoRenderer\"][\"navigationEndpoint\"][\"commandMetadata\"][\n \"webCommandMetadata\"][\"url\"]\n streams.append({'title': title, 'link': link_path, 'image': image})\n\n return streams\n\n\ndef insert_or_update_to_stream(streams):\n with Database() as db, db.connect() as conn, conn.cursor(\n cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n print(\"Refresh stream table.\")\n for stream in streams:\n cur.execute(f'''\n INSERT INTO stream (title, image, link)\n VALUES (\n '{stream.get('title')}', \n '{stream.get('image')}',\n '{stream.get('link')}'\n ) ON CONFLICT ON CONSTRAINT stream_unique\n DO UPDATE SET\n title = '{stream.get('title')}',\n image = '{stream.get('image')}',\n link = '{stream.get('link')}'\n ''')\n conn.commit()\n\n\ndef all_game(season):\n schedule = requests.get(\n f'https://pleagueofficial.com/schedule-{season}', headers={\n 'User-Agent': f'Google browsers {season}',\n })\n soup = BeautifulSoup(schedule.content, 'html.parser')\n date, week, time, teams, images, scores, places, people = [], [], [], [], [], [], [], []\n try:\n for dt in soup.find_all(class_='fs16 mt-2 mb-1'):\n date.append(dt.get_text())\n for wk in soup.find_all(class_='fs12 mb-2'):\n week.append(wk.get_text())\n for t in soup.select(\n '.col-lg-1.col-12.text-center.align-self-center.match_row_datetime > h6[class~=fs12]'):\n time.append(t.get_text())\n except Exception as e:\n lotify.send_message(access_token=notify, message=f'比賽資訊網站格式錯誤 \\n{e}')\n\n event_date = [] # Arrange date to one\n for index in range(len(date)):\n event_date.append(f'{date[index]}{week[index]} {time[index]}')\n\n for team in soup.find_all(class_='PC_only fs14'):\n teams.append(team.get_text())\n for img in soup.find_all('img', {'class': 'w105'}): # 2 to be a play\n if 'src' in img.attrs:\n team_not_sure = img['src'].startswith('//pleagueofficial.com/upload/')\n if (img['src'].startswith('https://pleagueofficial.com/') or team_not_sure)\\\n and img['src'].endswith('.png'):\n if team_not_sure:\n images.append(f\"https:{img['src']}\")\n else:\n images.append(img['src'])\n else:\n images.append('https://pleagueofficial.com/upload/p_team/logo_1_1605758005.png')\n for score in soup.find_all('h6', {'class': 'PC_only fs22'}):\n scores.append(score.get_text())\n for place in soup.find_all('h5', {'class': 'fs12 mb-0'}):\n places.append(place.get_text())\n for person in soup.find_all('div', {'class': 'mt-3 mb-md-0 mb-3 fs12 text-center PC_only'}):\n people.append(person.get_text())\n return event_date, teams, scores, places, people, images\n\n\ndef arrange_lists_to_one(\n event=None, teams=None,\n scores=None, places=None,\n people=None, images=None,\n season=None) -> list:\n games = []\n length = len(event)\n index, index2 = 0, 0\n while index < length:\n games.append({\n 'event_date': event[index],\n 'customer': teams[index2],\n 'main': teams[index2 + 1],\n 'customer_image': images[index2],\n 'main_image': images[index2 + 1],\n 'people': people[index],\n 'place': places[index],\n 'score': f'{scores[index2]}:{scores[index2 + 1]}',\n 'season': season\n })\n index += 1\n index2 += 2\n\n return games\n\n\ndef insert_or_update_to_game(games: list):\n with Database() as db, db.connect() as conn, conn.cursor(\n cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n for game in games:\n cur.execute(f'''\n INSERT INTO game (customer, customer_image, main, main_image, score, people, place, event_date, season)\n VALUES (\n '{game['customer']}', \n '{game['customer_image']}', \n '{game['main']}', \n '{game['main_image']}', \n '{game['score']}', \n '{game['people']}', \n '{game['place']}',\n '{game['event_date']}',\n '{game['season']}'\n ) ON CONFLICT ON CONSTRAINT game_unique\n DO UPDATE SET\n score = '{game['score']}',\n place = '{game['place']}',\n people = '{game['people']}',\n event_date = '{game['event_date']}',\n season = '{game['season']}'\n ''')\n conn.commit()\n\n\ndef main():\n print('Check tables status...')\n try:\n db_table_check()\n except Exception as e:\n lotify.send_message(access_token=notify, message=f'DB 建立出錯 \\n{e}')\n time.sleep(1)\n print('Youtube stream loading...')\n try:\n streams = stream_parser()\n except Exception as e:\n lotify.send_message(\n access_token=notify,\n message=f'Youtube 爬蟲出事啦\\n{e}')\n print('Stream gotcha!')\n time.sleep(1)\n print('Sync stream data to database.')\n try:\n insert_or_update_to_stream(streams)\n except Exception:\n lotify.send_message(\n access_token=notify,\n message=f'影片檔案資訊無法進入 db\\n陣列: {str(streams)}')\n print('Sync games...')\n\n # Add different season data to SQL\n seasons = [\n 'pre-season', 'regular-season',\n 'playoffs', 'finals'\n ]\n total_game: list = []\n for season in seasons:\n event_date, teams, scores, places, people, images = all_game(season)\n\n print('Arrange data to list...')\n games = arrange_lists_to_one(event_date, teams,\n scores, places,\n people, images,\n season)\n for game in games:\n total_game.append(game)\n print('Game arrange done.')\n print('Ready to insert games.')\n try:\n insert_or_update_to_game(total_game)\n except Exception:\n lotify.send_message(\n access_token=notify,\n message=f'比賽資訊無法進入 db\\n陣列: {str(total_game)}')\n print('Insert games done.')\n\n\nmain()\n","repo_name":"louis70109/PLeagueBot","sub_path":"scripts/stream_and_game.py","file_name":"stream_and_game.py","file_ext":"py","file_size_in_byte":9614,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"17191055114","text":"import json\n\nwith open('image_info_test-dev2017.json','r') as j:\n all = json.load(j)\n l = all['images']\n b = all['categories']\n print(f'\\nThis file contains {len(l)} links to images from {len(b)} categories\\n')\n filenames = []\n for inf in l:\n name = inf.get('file_name')\n if name == \"000000000001.jpg\":\n print('URL: ',inf.get('coco_url'),'\\nHeight: ',inf.get(\"height\"),'\\nWidth: ',inf.get(\"width\"),'\\nID: ',inf.get(\"id\"))\n filenames.append(int(name.replace('.jpg','')))\n max_number = max(filenames)\n ind = filenames.index(max_number)\n print('Name of an image with the biggest number: ',l[ind].get('file_name'))\n \n \n \n ","repo_name":"EveBogdanova/PythonPractice","sub_path":"Practice_14/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1229993916","text":"from .hook import Hook\nfrom ..dist_utils import use_tpu\n\n\nclass DistSamplerSeedHook(Hook):\n def before_epoch(self, runner):\n if use_tpu():\n loader = runner.data_loader._loader\n else:\n loader = runner.data_loader\n\n if hasattr(loader.sampler, 'set_epoch'):\n # in case the data loader uses `SequentialSampler` in Pytorch\n loader.sampler.set_epoch(runner.epoch)\n elif hasattr(loader.batch_sampler.sampler, 'set_epoch'):\n # batch sampler in pytorch warps the sampler as its attributes.\n loader.batch_sampler.sampler.set_epoch(runner.epoch)\n","repo_name":"neuroailab/pt_framework","sub_path":"src/pt_framework/hooks/sampler_seed.py","file_name":"sampler_seed.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12285094908","text":"from git import Repo\nimport os\nimport numpy as np\nimport datetime\nimport requests\nimport re\n# from bs4 import BeautifulSoup\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom utils import url_generate, url_template, git_push\nfrom pypinyin import pinyin\n\nISOTIMEFORMAT = '%Y-%m-%d' # %H:%M:%S,f'\n\ntheTime_str = datetime.datetime.now().strftime(ISOTIMEFORMAT)\nprint(theTime_str)\n\ntheTime_date = datetime.datetime.strptime(theTime_str, ISOTIMEFORMAT)\n\ndelta = datetime.timedelta(days=100)\nlastTime_date = theTime_date - delta\nlastTime_str = lastTime_date.strftime('%Y-%m-%d')\nprint(lastTime_str)\n\ngold_no = \"11\"\n\nweb_url_pp0 = url_generate(gold_no, date_start=lastTime_str, date_end=theTime_str, page=\"1\", url=url_template)\nresponse = requests.get(web_url_pp0)\nregex = re.compile('共(.*)页 ')\n# regex = re.compile('共.*?页 ')\npages = regex.findall(response.text)[0]\n\nprint(pages)\n\ndate_buf = []\nbrand_buf = []\nprice_buf = []\n\nfor page_idx in range(int(pages)+1):\n page_str = str(page_idx)\n web_url = url_generate(gold_no, date_start=lastTime_str, date_end=theTime_str, page=page_str, url=url_template)\n\n print(web_url)\n\n tables = pd.read_html(web_url)\n table = tables[0]\n # print(table)\n\n for key in table.keys():\n# print(list(table[key]))\n if \"日期\" in list(table[key]):\n # print(list(table[key]))\n date_buf.extend(list(table[key][1:]))\n\n if \"品牌\" in list(table[key]):\n # print(list(table[key]))\n brand_buf.extend(list(table[key][1:]))\n\n if \"价格\" in list(table[key]):\n # print(list(table[key]))\n price_buf.extend(list(table[key][1:]))\n\nprint(date_buf)\nprint(brand_buf)\nprint(price_buf)\n\nprint(np.unique(brand_buf))\nprint(len(price_buf))\n\n\n\n\ndate_buf_sorted = sorted(date_buf)\nprint(date_buf_sorted)\n\n\nprice_date_buf = {}\nfor idx, brand in enumerate(brand_buf):\n if brand not in price_date_buf.keys():\n price_date_buf[brand] = {\"date_str\": [], \"price\": [], \"date_int\": []}\n else:\n price_date_buf[brand][\"date_str\"].append(date_buf[idx])\n price_date_buf[brand][\"price\"].append(float(price_buf[idx]))\n\n\ndef interval_estimation(date1, date2):\n\n theTime_date1 = datetime.datetime.strptime(date1, ISOTIMEFORMAT)\n theTime_date2 = datetime.datetime.strptime(date2, ISOTIMEFORMAT)\n interval = (theTime_date2 - theTime_date1).days\n\n return interval\n\n\ndate_start_str = date_buf[-1]\ndata_end_str = date_buf[0]\nbrand_buf_unique = np.unique(brand_buf)\nfor brand in brand_buf_unique:\n\n date_brand_str = price_date_buf[brand][\"date_str\"]\n date_brand_int = []\n for date_str in date_brand_str:\n date_brand_int.append(interval_estimation(date_start_str, date_str))\n\n price_date_buf[brand][\"date_int\"] = date_brand_int\n\nplt.figure(figsize=(20, 5))\n\nfor brand in brand_buf_unique:\n\n # print(brand)\n brand_pinyin = pinyin(brand)\n brand_pinyin_str = str(brand_pinyin)\n plt.plot(price_date_buf[brand][\"date_int\"], price_date_buf[brand][\"price\"], linewidth=2, label=brand_pinyin_str)\n\n\n# plt.rcParams['font.family'] = 'serif'\n# plt.rcParams['font.serif'] = 'Simsun (founder extended)'\nplt.legend(fontsize=15, bbox_to_anchor=(1.0, 1.0), loc='upper left')\nplt.xlabel(\"Date\", fontsize=15)\nplt.ylabel(\"Price\", fontsize=15)\nplt.yticks(fontsize=15)\nplt.xticks([0, interval_estimation(date_start_str, data_end_str)], [date_start_str, data_end_str], fontsize=15)\nplt.xlim([0, interval_estimation(date_start_str, data_end_str)])\nplt.subplots_adjust(left=0.04, bottom=0.15, top=0.93, right=0.8, wspace=0.05)\nplt.savefig(\"figures/price_vs_time.png\")\nplt.close()\n\n\ngit_push() # Update Github Repo\n\n\n\n\n\n#\n#\n# response = requests.get(web_url)\n# print(response.text)\n# soup = BeautifulSoup(response.text, 'html.parser')\n# table = soup.find('table', {'id': 'datalist'})\n# rows = table.findAll('tr')\n#\n# for row in rows[1:]:\n# cols = row.findAll('td')\n# date = cols[0].text.strip()\n# price = cols[1].text.strip()\n# print(f'{date}: {price}')\n\n\n\n\n","repo_name":"guanchuwang/Gold_price","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4267579582","text":"import pytest\n\nfrom invest import helpers\n\n\n@pytest.mark.parametrize('path,expected_prefix', (\n ('es/industries/foo/', 'es'),\n ('zh-hans/industries/', 'zh-hans'),\n ('de/industries/aerospace/bar/', 'de'),\n ('fr/industries/free-foods/', 'fr'),\n))\ndef test_get_language_from_prefix(path, expected_prefix):\n prefix = helpers.get_language_from_prefix(path)\n assert prefix == expected_prefix\n","repo_name":"uktrade/great-international-ui","sub_path":"invest/tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"32424705436","text":"import matplotlib.pyplot as plt\n\nfrom plotDataPoints import plotDataPoints\nfrom show import show\n\ndef plotProgresskMeans(X, centroids, previous, idx, K, i, color):\n \"\"\"plots the data\n points with colors assigned to each centroid. With the previous\n centroids, it also plots a line between the previous locations and\n current locations of the centroids.\n \"\"\"\n\n# Plot the examples\n plotDataPoints(X, idx)\n\n# Plot the centroids as black x's\n plt.scatter(centroids[:, 0], centroids[:, 1],\n marker='x', s=60, lw=3, edgecolor='k')\n\n# Plot the history of the centroids with lines\n for j in range(len(centroids)):\n plt.plot([centroids[j,0], previous[j,0]],\n [centroids[j,1], previous[j,1]], c=color)\n\n# Title\n plt.title('Iteration number %d' % i)\n show()\n raw_input(\"Program paused. Press Enter to continue...\")\n\n","repo_name":"mstampfer/Coursera-Stanford-ML-Python","sub_path":"ex7/plotProgresskMeans.py","file_name":"plotProgresskMeans.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":434,"dataset":"github-code","pt":"81"} +{"seq_id":"14640308351","text":"from itertools import combinations\n\n\nN = int(input())\nresult = set()\n\n# 1\nfor i in range(1, 11):\n for com in combinations(range(0, 10), i):\n com = sorted(com, reverse=True)\n result.add(int(''.join(map(str, com))))\nresult = sorted(result)\n\ntry:\n print(result[N - 1])\nexcept IndexError:\n print(-1)\n\n# 2\nnumber = []\n\n\ndef dfs():\n if number:\n result.add(int(''.join(map(str, number))))\n for i in range(10):\n if not number or number[-1] > i: # 왼쪽 수가 오른쪽 수 보다 크므로 감소 케이스\n number.append(i)\n dfs()\n number.pop()\n\n\ndfs()\nresult = sorted(result)\nprint(result[N - 1] if len(result) >= N else -1)\n\n\n\n\"\"\"\n1자리 수 0 ~ 9 \n2자리 수 10 ~ 90 : 앞자리 수만큼 케이스 생김 ex) 9x : 9가지\n3자리 수 210 ~ 987 : 맨 앞자리 수(k) 보다 작은 경우, k - 1 가지 \n\n1xx : 1 : 1\n3xx : 2 + 1 : 3 \n4xx : 3 + 2 + 1 : 6 \n5xx : 4 + 3 + 2 + 1 : 10\n\"\"\"\n# def sample(k: int):\n# if len(set(str(k))) == len(str(k)) and str(i) == ''.join(sorted(list(str(i)), reverse=True)):\n# return True\n#\n#\n# res = 0\n# while True:\n# # if count > int(N):\n# # res = -1\n# if i == 1000000:\n# print(-1)\n# exit()\n# if count == int(N):\n# break\n# if sample(i):\n# res = i\n# count += 1\n# i += 1\n# print(res)","repo_name":"hugehoo/problem-solving","sub_path":"2022/2022-12/1174.py","file_name":"1174.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3181459891","text":"from woniuboss_protocol_framework.comm.get_session import Getsession\r\nfrom woniuboss_protocol_framework.comm.operatdb import Operatdb\r\n\r\n\r\nclass Testaddsrc:\r\n def __init__(self):\r\n self.gs = Getsession()\r\n self.db = Operatdb()\r\n\r\n def test_addsrc(self,protocolname, caseid, casetitle,meth,url,data,exc):\r\n self.gs.login()\r\n res, code = self.gs.reques_meth(meth, url, data)\r\n # 构建数据 与 数据库查询出来的作对比\r\n li1 = []\r\n dic = {}\r\n dic.update({\"age\": data[\"cus.age\"], \"sex\": data[\"cus.sex\"], \"email\": data[\"cus.email\"], \"school\": data[\"cus.school\"]})\r\n li1.append(dic)\r\n mail = data[\"cus.email\"]\r\n sql = f'SELECT age,sex,email,school FROM customer WHERE email=\"{mail}\";'\r\n # 得到数据库查询结果\r\n dbli,num= Operatdb().read_db(sql)\r\n # print(li1,dbli)\r\n # print(exc,res)\r\n if code == 200 and exc == res and li1==dbli:\r\n print(\"测试成功\")\r\n self.db.insert_res(protocolname, caseid, casetitle, \"success\", \"无\")\r\n else:\r\n print(\"测试失败\")\r\n self.db.insert_res(protocolname, caseid, casetitle, \"failed\", \"无\")","repo_name":"Nothing-zz/protocolframework","sub_path":"test_case/test_addsrc.py","file_name":"test_addsrc.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23586193784","text":"file = open(\"Lina_ali_no_dups_reduced_renamed.fasta\").readlines()\r\nfile = [i.rstrip() for i in file]\r\nnames = []\r\nseq = []\r\nseq_temp = \"\"\r\n\r\nfor i in file:\r\n if \">\" in i:\r\n if seq_temp:\r\n seq.append(seq_temp)\r\n seq_temp = \"\"\r\n names.append(i)\r\n else:\r\n seq_temp += i\r\n\r\nseq.append(seq_temp)\r\n\r\nout = open(\"Lina_ali_no_dups_reduced_3_rep.fasta\", \"w\")\r\nfor i in range(len(seq)):\r\n if int(names[i].split(\"_\")[-1]) % 4 != 0:\r\n out.write(names[i] + \"\\n\")\r\n out.write(seq[i] + \"\\n\")","repo_name":"ilbsm/CL0057_desc","sub_path":"basic_project_files/reduce_n_of_seq.py","file_name":"reduce_n_of_seq.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14959256264","text":"from logging import getLogger\nfrom fastapi import APIRouter, Form\nfrom typing import Optional\n\nfrom src.models.achievement_report import AchievementReport\nfrom src.rcode import rcode\n\nlogger = getLogger(\"app\")\n\nrouter = APIRouter()\n\n\n@router.get(\"/achievement_report\")\ndef get_achievement_report(\n id: Optional[str] = None,\n year: Optional[int] = None,\n id_achievement_type: Optional[str] = None,\n achievement_count: Optional[int] = None,\n):\n if (\n id is None\n and year is None\n and id_achievement_type is None\n and achievement_count is None\n ):\n return rcode(\"NotFound\")\n \n error, achievement_report = AchievementReport.get(\n id, year, id_achievement_type, achievement_count\n )\n if error:\n return rcode(error)\n\n return {**rcode(1000), \"achievement_reports\": achievement_report}\n\n\n@router.get(\"/all_achievement_reports\")\ndef get_all_achievement_reports():\n error, achievement_reports = AchievementReport.get_all()\n\n if error:\n return rcode(error)\n\n return {**rcode(1000), \"achievement_reports\": achievement_reports}\n\n\n@router.post(\"/achievement_report\")\ndef post_achievement_report(\n year: int = Form(None),\n id_achievement_type: str = Form(None),\n achievement_count: int = Form(None),\n):\n error, _ = AchievementReport.insert(year, id_achievement_type, achievement_count)\n\n if error:\n return rcode(error)\n\n return rcode(1000)\n\n@router.put(\"/achievement_report\")\ndef put_achievement_report(\n id: str = Form(None),\n year: int = Form(None),\n id_achievement_type: str = Form(None),\n achievement_count: int = Form(None),\n):\n error, _ = AchievementReport.update(\n id, year, id_achievement_type, achievement_count\n )\n\n if error:\n return rcode(error)\n\n return rcode(1000)\n\n@router.delete(\"/achievement_report\")\ndef delete_achievement_report(id: str = Form(None)):\n error, _ = AchievementReport.delete(id)\n\n if error:\n return rcode(error)\n\n return rcode(1000)","repo_name":"hao3830/family-free-backend","sub_path":"app/routers/achievement_report.py","file_name":"achievement_report.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31352215713","text":"#!/usr/bin/env python3\n\n# switch on/off debug printing\nDebug=True\n\nimport subprocess,imp,sys,os,sys,copy\n\nHomeDir=os.path.expanduser('~')\nLinks=HomeDir+'/links'\nsys.path.append(Links+'/myPython')\n\nimport myModule\nimp.reload(myModule)\n\n\nZWNJ=myModule.hex_chr('200c'); ZWJ=myModule.hex_chr('200d')\n\n\n#Consts={'ZWNJ':ZWNJ,'ZWJ':ZWJ,'HomeDir':HomeDir}\n\n# combinations for mandatory non-space and optional spaces \nCombs=([(r'j..*',r'..*'),(r'paa',r'etm'),(r'p..*',r'ef'),(r'pvg',r'etm'),(r'p..*',r'ecs')],\n [(r'p..*',r'nbn'),(r'nbn',r'p..*'), \n #(r'jc.',r'px'),\n (r'p..*', r'e.*'), # verb-modal\n (r'xs..*',r'nbn'),(r'ncp.',r'xs..*'), # verb / noun comb\n (r'nc..',r'nc..') # this is compound nouns\n ])\n\n# this is for a single sentence\ndef main(Cntr,SentA,Bags):\n UnmarkedSent=''.join([ WC_WP[0]+' ' for WC_WP in SentA ])\n if Debug: print('\\nDoing sent ind '+str(Cntr)+': '+UnmarkedSent)\n try:\n (WC_WPs,NormP)=normalise_tags(SentA)\n except:\n if Debug:\n (WC_WPs,NormP)=normalise_tags(SentA)\n else:\n pass\n try:\n (MkdSent,NWPs)=mark_spaces(WC_WPs)\n except:\n if Debug:\n (MkdSent,NWPs)=mark_spaces(WC_WPs)\n Bags=update_bags(Bags,MkdSent,NWPs,NormP,UnmarkedSent)\n return (Bags,NormP)\n\n\n# tag (wc_wp) normalisation-----------------------------------------------\n\ndef normalise_tags(OrgWC_WPs):\n WC_WPs=copy.deepcopy(OrgWC_WPs)\n NWPs=[]\n WC_WPs=purge_bad_wps(WC_WPs)\n for WC_WP in WC_WPs:\n try:\n if Debug: print('Normalising WC: '); print(WC_WP)\n NWP=normalise_tag0(WC_WP)\n if Debug: sys.stdout.write(' ... and normalised: '); print(NWP)\n NWPs.append(NWP)\n \n except:\n sys.stdout.write('Failing normalising: '); print(WC_WP)\n NormP=False\n\n \n NormP=normalisation_ok_p(WC_WPs,NWPs)\n \n return (NWPs,NormP)\n\n\ndef normalise_tag0(OrgWCTagPair):\n # import normalise\n (WC,WPs)=copy.deepcopy(OrgWCTagPair)\n if WC==''.join([ WP[0] for WP in WPs ]):\n NewWPs=WPs\n else:\n WNums=[ len(WP[0]) for WP in WPs ]\n Ws=''.join([ WP[0] for WP in WPs ])\n Ps=[ WP[1] for WP in WPs ]\n NewWPs=normalise_unequaltag(WC,Ws,WNums,Ps)\n\n return NewWPs\n\ndef normalise_unequaltag(WC,Ws,WNums,Ps):\n NewWPs=[]\n\n CumWCChars=''; WCChar=WC[0];WsChar=Ws[0]\n while WC and WCChar==WsChar:\n # cumulates it\n CumWCChars=CumWCChars+WCChar\n\n if len(CumWCChars)==WNums[0]:\n WNums.pop(0)\n NewWPs.append((CumWCChars,Ps.pop(0)))\n CumWCChars=''\n \n WC=WC[1:]; Ws=Ws[1:]\n WCChar=WC[0];WsChar=Ws[0]\n\n\n\n if CumWCChars:\n NewWPs.append((CumWCChars+WC[0],Ps.pop(0)))\n WC=WC[1:]\n\n NewWPsRev=[]\n CumWCCharsRev=''; WCCharRev=WC[-1];WsCharRev=Ws[-1]\n while WC and WCCharRev==WsCharRev:\n CumWCCharsRev=WCCharRev+CumWCCharsRev\n if len(CumWCCharsRev)==WNums[-1]:\n WNums.pop(-1)\n NewWPsRev.append((CumWCCharsRev,Ps.pop(-1)))\n CumWCCharsRev=''\n WC=WC[:-1]; Ws=Ws[:-1]\n WCCharRev=WC[-1];WsCharRev=Ws[-1]\n\n \n\n if CumWCCharsRev:\n NewWPsRev.append((CumWCCharsRev,Ps.pop(-1)))\n NewWPsRev.reverse()\n\n if WC and Ps:\n NewWPs.append((WC,Ps.pop(0)))\n\n NewWPs.extend(NewWPsRev)\n \n return NewWPs \n\n\ndef normalise_tag(OrgWCTagPair):\n (WC,Tag)=copy.deepcopy(OrgWCTagPair)\n NTag=[]\n # as long as the surface word is the same as the analysed, leave it\n while Tag:\n WP=Tag[0]\n# if len(WP)==2:\n (Wd0,PoS)=WP\n if WC.startswith(Wd0):\n NTag.append((Wd0,PoS))\n WC=WC[len(Wd0):]\n else:\n break\n Tag.pop(0)\n\n # this is when the surface and the analysed differ\n if Tag:\n AnalysisSum=sum([ len(WP[0]) for WP in Tag ])\n if len(WC)==AnalysisSum: # this is when no contraction occurs\n NTag.extend(split_accordingly(Tag,WC))\n else:\n NTag.extend(merge_wc(WC,Tag))\n \n\n return NTag\n\n\ndef merge_wc(WC,OrgWPs):\n NewWPs=[]; WPs=copy.deepcopy(OrgWPs)\n# AWds=[ WP[0] for WP in WPs ]\n while WPs:\n WP=WPs.pop(0); (AWd,PoS)=WP\n CumWC=''; DiffFnd=False\n for WCChar,AWdChar in zip(WC,AWd):\n if WCChar != AWdChar:\n DiffFnd=True\n CumWC=CumWC+WCChar; WC=WC[1:]\n if DiffFnd:\n NxtWP=WPs[0]\n RedNxtWd=NxtWP[0][1:]\n if RedNxtWd=='':\n WPs.pop(0)\n else:\n WPs[0]=(RedNxtWd,WPs[0][1])\n NewWP=(CumWC,WP[1])\n else:\n NewWP=WP\n NewWPs.append(NewWP)\n \n return NewWPs\n\ndef split_accordingly(Tag,WC):\n NewTag=[]\n Cnts=[ len(WP[0]) for WP in Tag ]\n Cum=0\n for (Cntr,Cnt) in enumerate(Cnts):\n Wd=WC[Cum:Cum+Cnt]; P=Tag[Cntr][1]\n NewTag.append((Wd,P))\n Cum=Cum+Cnt\n return NewTag\n\ndef purge_bad_wps(WC_WPs):\n# WC_WPs= [ WC_WP for WC_WP in WC_WPs if len(WC_WP)==2 d WC_WP[0]!='/' ]\n for WC_WP in WC_WPs:\n WPs=WC_WP[1]\n if ('','sp') in WPs:\n WPs[WPs.index(('','sp'))]=(',','sp')\n if ('',) in WPs:\n WPs.remove(('',))\n return WC_WPs\n# if (',','sp') in WPs:\n# WPs[WPs.index(('','sp'))]=('/','sp')\n\ndef normalisation_ok_p(WC_WPs,NWPs):\n OrgStr=''.join([ WC_WP[0] for WC_WP in WC_WPs ])\n NewStr=''.join([ WP[0] for WP in myModule.flatten_list(NWPs) ])\n if OrgStr==NewStr:\n Bool=True\n else:\n Bool=False\n return Bool\n\n# space marking-------------------------------------\n\n\ndef mark_spaces(NTags):\n Str=''\n WPs=myModule.flatten_list(NTags)\n\n LstWP=('','')\n # and this is WC level\n for (Cntr,NTag) in enumerate(NTags):\n# if Debug: sys.stdout.write('Up to WC'+str(Cntr+1))\n # this part is to determine whether the space between two wcs is optional or not\n # so, the first one is ignored, \n if Cntr==0:\n Space=''\n # and from the second one, the top tag of it and the last tag of the previous one are compared\n else:\n if optional_space_p(LstWP,NTag[0]):\n Space=' '+ZWNJ\n else:\n Space=' '\n \n # and this is the intra-WC part\n WCStr=mark_intrawc(NTag)\n # then mark the space plus the WC accordingly\n Str=Str+Space+WCStr\n# if Debug and Cntr==len(NTags)-1:\n# sys.stdout.write('Marked: '); print([Str])\n # the last one of the current tag is stored as the 'previous' tag\n LstWP=NTag[-1]\n return (Str,WPs)\n\n\ndef mark_intrawc(NTag):\n try:\n PrevWP=('','')\n for (Cntr,WP) in enumerate(NTag):\n (Wd,_)=WP\n if Cntr==0:\n Str=Wd\n elif optional_space_p(PrevWP,WP): \n #PoS.startswith('j'):\n Str=Str+ZWNJ+Wd\n else:\n Str=Str+ZWJ+Wd\n PrevWP=WP\n except:\n mark_intrawc(NTag)\n\n return Str\n\ndef optional_space_p(WP1,WP2):\n import re\n (MandCombs,OptCombs)=Combs\n (_Wd1,PoS1)=WP1; (_Wd2,PoS2)=WP2\n for MComb in MandCombs:\n if re.match(MComb[0], PoS1) and re.match(MComb[1], PoS2):\n Val=False; break\n else:\n Val=False\n for OComb in OptCombs:\n if re.match(OComb[0], PoS1) and re.match(OComb[1], PoS2):\n Val=True; break\n return Val\n\ndef split_and_glue_end(Tag,WC):\n NewTag=split_accordingly(Tag[:-2],WC)\n LstStr=WC[-2:]\n NewTag.append((LstStr,Tag[-2][1]))\n return NewTag\n\n\ndef update_bags(Bags,MkdSent,NWPs,NormP,UnmarkedSent):\n (MkdSents,UnmarkedSents,CtdWPs,CldWCs)=Bags\n if NormP:\n MkdSents.add(MkdSent)\n update_wdstats(NWPs,CtdWPs)\n process_wcs(MkdSent,NWPs,CldWCs)\n else:\n UnmarkedSents.add(UnmarkedSent)\n\n return Bags\n\n\n\ndef update_wdstats(WPs,CtdWPs):\n for WP in WPs:\n if WP in CtdWPs.keys():\n CtdWPs[WP]=CtdWPs[WP]+1\n else:\n CtdWPs[WP]=1\n\ndef process_wcs(MkdSent,NTags,CldWCs):\n for WC in MkdSent.split():\n if WC.find(ZWNJ)!=-1:\n CldWCs['opt'].add(WC)\n if WC.find(ZWJ)!=-1:\n CldWCs['mand'].add(WC)\n\n# ===== old one, this does pre-processing for each sentence\n\n'''\n \ndef main1(Cntr,Sent,Bags,Len):\n print('\\nSentence being processed, Index '+str(Cntr)+' (of '+str(Len)+'): '+Sent)\n \n (MkdSent,NWPs,NormP)=main_processes(Sent)\n # data updates, if normalisation goes okay\n if NormP:\n Bags=update_bags(Bags,MkdSent,NWPs)\n\n return (Bags,NormP)\n\ndef main_processes(Sent):\n if Debug: sys.stdout.write('PoS tagging...')\n WC_WPs=pos_preprocess(Sent)\n \n if Debug: print(' done'); sys.stdout.write('Now normalising...')\n (NWPs,NormP)=normalise_tags(WC_WPs)\n \n if NormP:\n if Debug: print('Now marking spaces...')\n (MkdSent,NWPs)=mark_spaces(NWPs)\n \n sys.stdout.write('Marked: '); print([MkdSent])\n else:\n MkdSent=''; NWPs=[]; UnmarkedSent=Sent\n print('Normalisation check failed for '+Sent+', skipping')\n \n return (MkdSent,NWPs,NormP)\n\n'''\n","repo_name":"yosato/mygithub","sub_path":"morphology/pos_sents.py","file_name":"pos_sents.py","file_ext":"py","file_size_in_byte":9312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13981048449","text":"from django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\n\nclass User(AbstractUser):\n \"\"\"Модель пользователя.\"\"\"\n\n email = models.EmailField(\n max_length=254,\n verbose_name='Почта')\n username = models.CharField(\n max_length=150,\n unique=True,\n verbose_name='Псевдоним')\n first_name = models.CharField(\n max_length=150,\n verbose_name='Имя')\n last_name = models.CharField(\n max_length=150,\n verbose_name='Фамилия')\n password = models.CharField(\n max_length=150)\n\n class Meta:\n verbose_name = 'Пользователь'\n verbose_name_plural = 'Пользователи'\n\n\nclass Subscribe(models.Model):\n \"\"\"Модель подписки.\"\"\"\n\n subscriber = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='subscriber',\n verbose_name='Подписчик')\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='author',\n verbose_name='Автор рецептов')\n\n class Meta:\n verbose_name = 'Подписка'\n verbose_name_plural = 'Подписки'\n constraints = [\n models.UniqueConstraint(\n fields=['subscriber', 'author'],\n name='subscriber_author'),\n ]\n","repo_name":"Gale4/foodgram-project-react","sub_path":"backend/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5972525708","text":"import random\nsand = color(230, 230, 179)\nwater = color(140, 191, 217) \nswamp = color(167, 152, 118) \nwall = color(90, 90, 90) \ninf = int(1e9+7)\n\nSAND = 1\nSWAMP = 5\nWATER = 10\nWALL = inf\n\nclass Grid:\n def __init__(self, cellSize):\n self.cellSize = cellSize\n self.shape = (width//cellSize, height//cellSize)\n self.world = [[0 for x in range(self.shape[1])] for y in range(self.shape[0])] \n self.seen = [[False for x in range(self.shape[1])] for y in range(self.shape[0])]\n self.visited = [[False for x in range(self.shape[1])] for y in range(self.shape[0])]\n self.dist = [[0 for x in range(self.shape[1])] for y in range(self.shape[0])]\n self.parent = [[None for x in range(self.shape[1])] for y in range(self.shape[0])]\n \n def buildMap (self, noiseScale, seed):\n noiseSeed(seed)\n noiseDetail(8)\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n xoff = float(i) / self.shape[0]\n yoff = float(j) / self.shape[1]\n result = noise(xoff * noiseScale, yoff * noiseScale)\n if (result <= 0.3):\n self.world[i][j] = WATER\n elif (result <= 0.4):\n self.world[i][j] = WALL\n elif (result <= 0.55):\n self.world[i][j] = SAND\n elif (result <= 0.65):\n self.world[i][j] = SWAMP\n else :\n self.world[i][j] = WATER\n \n def reset(self):\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n self.seen[i][j] = False\n self.visited[i][j] = False\n self.dist[i][j] = inf\n def see(self, p):\n self.seen[p[0]][p[1]] = True\n def visit(self, p):\n self.visited[p[0]][p[1]] = True\n def wasSeen(self, p):\n return self.seen[p[0]][p[1]]\n def wasVisited(self, p):\n return self.visited[p[0]][p[1]]\n\n def setDistW(self, p):\n self.dist[p[0]][p[1]] = self.world[p[0]][p[1]]\n def setDist(self, p, w):\n self.dist[p[0]][p[1]] = w\n\n def setParent(self, p, par):\n self.parent[p[0]][p[1]] = par\n \n def getPath(self, src, dst):\n path = []\n dist = grid.world[src[0]][src[1]]\n while dst != src:\n path.append(dst)\n dist += grid.world[dst[0]][dst[1]]\n dst = grid.parent[dst[0]][dst[1]]\n path.append(src)\n return (dist, path)\n\n delta = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n \n def is_within_map_bounds(self, p):\n return p[0] >= 0 and p[1] >= 0 and p[0] < self.shape[0] and p[1] < self.shape[1]\n \n def is_walkable(self, p):\n return self.world[p[0]][p[1]] != WALL\n def walkablePosition(self):\n pos = (0, 0)\n while True:\n pos = (random.randint(0, self.shape[0]-1), random.randint(0, self.shape[1]-1))\n if (self.is_walkable(pos) and self.world[pos[1]][pos[1]] != WATER):\n return pos\n \n def adjacent(self, p):\n neighbors = []\n for (dx, dy) in self.delta:\n np = (p[0] + dx, p[1] + dy)\n if (self.is_within_map_bounds(np) and self.is_walkable(np)):\n neighbors.append(np)\n return neighbors\n\n def getPath(self, src, dst):\n path = []\n dist = self.world[src[0]][src[1]]\n while dst != src:\n path.append(dst)\n dist += self.world[dst[0]][dst[1]]\n dst = self.parent[dst[0]][dst[1]]\n path.append(src)\n return (dist, path)\n \n def cellCenter(self, pos):\n return PVector((pos[0] + 0.5)*self.cellSize, (pos[1] + 0.5)*self.cellSize)\n\n def displayCell(self, p, cellColor):\n noStroke()\n fill(cellColor)\n rect(p[0] * self.cellSize, p[1] * self.cellSize, self.cellSize, self.cellSize)\n def highlightCell(self, p, borderColor):\n stroke(borderColor)\n strokeWeight(2);\n noFill()\n rect(p[0] * self.cellSize, p[1] * self.cellSize, self.cellSize, self.cellSize)\n \n def displaySeen(self):\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n p = (i, j)\n if (self.wasVisited(p)):\n self.displayCell(p, color(220,20,60, 50))\n elif self.wasSeen (p):\n self.displayCell(p, color(220,20,60,99))\n self.highlightCell(p, color(220,20,60))\n \n def display(self):\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n p = (i, j)\n if (self.world[i][j] == SAND):\n self.displayCell(p, sand)\n elif (self.world[i][j] == SWAMP):\n self.displayCell(p, swamp)\n elif (self.world[i][j] == WATER):\n self.displayCell(p, water)\n else:\n self.displayCell(p, wall)\n","repo_name":"mhco0/search-algorithms-view","sub_path":"run/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71438133385","text":"from flask import Flask, request, redirect\nimport random\n\napp = Flask(__name__)\n\nnextId = 4\ntopics = [\n {'id': 1, 'title': 'html', 'body': 'html is ...'},\n {'id': 2, 'title': 'css', 'body': 'css is ...'},\n {'id': 3, 'title': 'javascript', 'body': 'javascript is ...'},\n]\n\ndef template(contents, content = \"

Welcome

Hello, Web\"):\n return f'''\n \n \n

WEB

\n
    \n {contents}\n
\n {content}\n
\n \n \n '''\n\ndef getContents():\n liTags = \" \"\n\n for topic in topics:\n liTags += f'
  • {topic[\"title\"]}
  • '\n\n return liTags\n\n\n@app.route('/')\ndef index():\n return template(getContents())\n\n@app.route('/read//') #id를 string으로 불러오기 때문에, int:로 형 변환 시켜줘야 함\ndef read(id):\n\n title = \" \"\n body = \" \"\n for topic in topics:\n if id == topic['id']:\n title = topic['title']\n body = topic['body']\n break # !!\n\n return template(getContents(), f\"

    {title}

    {body}

    \")\n\n@app.route('/create/', methods=['POST', 'GET'])\ndef create():\n # method = GET이면, url을 통해 공개적으로 정보 전달 (조회) / POST이면, 은밀한 방식으로 정보 전달 (수정)\n if request.method == 'GET':\n content = '''\n
    \n

    \n

    \n

    \n
    \n '''\n return template(getContents(), content)\n\n elif request.method == 'POST':\n global nextId # 전역변수를 건드리기 위해서는, 사용하기 전에 global과 함께 선언해 줘야 한다.\n title = request.form['title']\n body = request.form['body']\n newTopic = {'id': nextId, 'title': title, 'body': body} # newTopic이라는 새 엘리먼트를 만들고\n topics.append(newTopic) # topics 리스트에 추가한다.\n url = '/read/'+str(nextId) + '/' # nextID는 int라서, str으로 변경하기!\n nextId = nextId + 1\n return redirect(url)\n\napp.run(port=5001, debug=True)\n\n","repo_name":"jangseoyoung98/Practice_Flask","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40775022098","text":"from __future__ import unicode_literals\nimport re\nimport urlquick\n\nfrom codequick import Resolver\n\nURL_LIVES = 'http://www.medi1tv.com/ar/live.aspx'\n\n\n@Resolver.register\ndef get_live_url(plugin, item_id, **kwargs):\n\n resp = urlquick.get(URL_LIVES)\n pattern = r\"Medi1TV\\ %s[\\S\\s]*file\\:\\ \\'(.*\\.m3u8.*)\\'[\\S\\s]*Medi1V_%s.jpg\" % (item_id, item_id.lower())\n manifesturl = re.compile(pattern).findall(resp.text)[0]\n finalurl = ''\n if manifesturl.startswith('https'):\n finalurl = manifesturl\n else:\n finalurl = 'https:' + manifesturl\n return finalurl\n","repo_name":"prf2/test","sub_path":"repo/plugin.video.catchuptvandmore/resources/lib/channels/ma/medi1.py","file_name":"medi1.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25212148314","text":"from .ninjs_formatter import NINJSFormatter\nimport superdesk\nimport elasticapm\n\n\nclass NewsroomNinjsFormatter(NINJSFormatter):\n name = \"Newsroom NINJS\"\n type = \"newsroom ninjs\"\n\n def __init__(self):\n self.format_type = \"newsroom ninjs\"\n self.can_preview = False\n self.can_export = False\n self.internal_renditions = [\"original\", \"viewImage\", \"baseImage\"]\n\n @elasticapm.capture_span()\n def _format_products(self, article):\n \"\"\"\n Return a list of API product id's that the article matches.\n\n :param article:\n :return:\n \"\"\"\n result = superdesk.get_resource_service(\"product_tests\").test_products(article)\n return [{\"code\": p[\"product_id\"], \"name\": p.get(\"name\")} for p in result if p.get(\"matched\", False)]\n\n @elasticapm.capture_span()\n def _transform_to_ninjs(self, article, subscriber, recursive=True):\n ninjs = super()._transform_to_ninjs(article, subscriber, recursive)\n\n if article.get(\"ingest_id\") and (\n article.get(\"auto_publish\") or (article.get(\"extra\") or {}).get(\"publish_ingest_id_as_guid\")\n ):\n ninjs[\"guid\"] = article.get(\"ingest_id\")\n if article.get(\"ingest_version\"):\n ninjs[\"version\"] = article[\"ingest_version\"]\n\n ninjs[\"products\"] = self._format_products(article)\n\n if article.get(\"assignment_id\"):\n assignment = superdesk.get_resource_service(\"assignments\").find_one(req=None, _id=article[\"assignment_id\"])\n if assignment is not None:\n if assignment.get(\"coverage_item\"):\n ninjs.setdefault(\"coverage_id\", assignment[\"coverage_item\"])\n if assignment.get(\"planning_item\"):\n ninjs.setdefault(\"planning_id\", assignment[\"planning_item\"])\n\n if article.get(\"refs\"):\n ninjs[\"refs\"] = article[\"refs\"]\n\n return ninjs\n","repo_name":"superdesk/superdesk-core","sub_path":"superdesk/publish/formatters/ninjs_newsroom_formatter.py","file_name":"ninjs_newsroom_formatter.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"783437780","text":"class Constants:\n \"\"\"docstring for Constants\"\"\"\n MAIN_WINDOW_SIZE = MAIN_WINDOW_WIDTH, MAIN_WINDOW_HEIGHT = 800, 600\n\n class Color:\n BLACK = (0, 0, 0)\n WHITE = (255, 255, 255)\n BLUE = (0, 0, 255)\n\n class ObjectType:\n Null = 0\n Pad = 1\n Ball = 2\n Player = 3\n\n class Paths:\n Spritesheet = \"pyGame/content/tiles_spritesheet_platformer.png\"\n","repo_name":"mbobcik/Interesting_Samples","sub_path":"pyGame/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42718514898","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = \"https://search.naver.com/search.naver?sm=tab_hty.top&where=news&query=%EA%B3%A1%EC%84%B1+%EB%82%98%ED%99%8D%EC%A7%84&oquery=%EA%B3%A1%EC%84%B1+%EB%82%98%ED%99%8D%EC%A7%84&tqi=TFt3wdpySD0ssvkgAFCssssstbh-278429\"\n\nheader = {\n 'User-Agent': 'Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; ko-KR))',\n}\n\ntry:\n req = requests.get(url, headers=header)\n soup = BeautifulSoup(req.content, 'html.parser')\n data = soup.find('div', class_='title_desc all_my').text\n data = data[7:-1]\n data = data.replace(',', '')\n print(data)\nexcept Exception as e:\n print(e)","repo_name":"ok-data/movie-crawler","sub_path":"Naver_clean_crawling.py","file_name":"Naver_clean_crawling.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1381002541","text":"\nimport pandas as pd\n\n#################################################\n# define major topic codes\n#################################################\n\n# major topic codes for loop (FOR NYT!!!)\nmajortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]\n\n#########################################################################\n# create final summary analysis\n\ndata = pd.read_csv(\"./Data_NYT_clean_SPARK_START_sim.csv\", sep=';')\ndata['majortopic'].loc[data['majortopic']>23] = 100\nprint(data.groupby(['majortopic']).count())\n\nh_dict = {0:\"sim1_and_sim2_to_sim3\",1:\"sim2_and_sim3_to_sim1\",2:\"sim1_and_sim3_to_sim2\"}\n\nfor g in range(10):\n g = g+1\n for h in range(3):\n for i in range(g):\n for j in range(g):\n df = pd.read_csv(f\"./ML1_workflow_on_NYT_x10_{h_dict[h]}/NYT_round1_sample{i+1}_svm{j}.csv\")\n df = df.sort_values(by=['doc_id'])\n df = df.reset_index(drop=True)\n #print(df.head())\n if i == 0 and j == 0:\n df_idf = df\n else:\n df_lemma = df_idf.iloc[:,1:].add(df.iloc[:,1:])\n df_idf = pd.concat([df_idf[['doc_id']], df_lemma], axis=1)\n #print(df_idf.head())\n\n for i in majortopic_codes:\n df_idf[[\"prediction_{i}\".format(i=i)]] = df_idf[[\"prediction_{i}\".format(i=i)]].floordiv(i)\n\n df_idf[\"max_value\"] = df_idf.iloc[:,1:].max(axis = 1, numeric_only = True)\n df_idf[f\"how_many_{g*g}votes\"] = df_idf.iloc[:,:-1].isin([g*g]).sum(1)\n\n print(df_idf.shape)\n df_idf = df_idf.loc[df_idf[\"max_value\"]==g*g]\n print(df_idf.shape)\n df_idf = df_idf.loc[df_idf[f\"how_many_{g*g}votes\"]==1]\n print(df_idf.shape)\n\n df_idf = df_idf.drop(['max_value', f'how_many_{g*g}votes'], axis=1)\n\n print(df_idf.head())\n\n for i in majortopic_codes:\n df_idf[[\"prediction_{i}\".format(i=i)]] = df_idf[[\"prediction_{i}\".format(i=i)]].floordiv(g*g)\n\n print(df_idf.head())\n\n for i in majortopic_codes:\n df_idf[[\"prediction_{i}\".format(i=i)]] = df_idf[[\"prediction_{i}\".format(i=i)]]*i\n\n\n df_idf[\"verdict_idf\"] = df_idf.iloc[:,1:].sum(1)\n\n df_idf = df_idf[[\"doc_id\", \"verdict_idf\"]]\n #print(df_idf)\n\n\n # merge all onto data\n df = data.merge(df_idf, how='inner', on='doc_id')\n df = df.fillna(0)\n df[[\"verdict_idf\"]] = df[[\"verdict_idf\"]].astype(int)\n df = df.drop(columns=['text'])\n\n df.to_csv(f\"NYT_round1_results_x{g*g}_{h_dict[h]}.csv\", index=False)\n","repo_name":"poltextlab/nyt_hybrid_classification_workflow","sub_path":"spark_cluster/03_SVM_workflow_on_NYT_x10/NYT_final_analysis_all_tables.py","file_name":"NYT_final_analysis_all_tables.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10495712454","text":"# Essentials\n# Hello, world!\n# Output\nprint(\"Hello, world\")\n# Input\ni1 = input(\"What is your name?\")\ni2 = int(input(\"How old are you?\"))\ni3 = bool(int(input(\"Are you married\")))\nprint(\"\\nStatus:\\nName: \", i1, \"\\nAge: \", i2, \"\\nMarried: \", i3, \"\\n\")\n# Loop\na = 0\nwhile a < 10:\n a = a + 1\n print(a)\n# Conditional statments\nn = int(input(\"Number? \"))\nif n < 0:\n print(\"The number is negative.\")\nelse:\n print(\"The number is positive.\")\n\nn = int(input(\"How many times? \"))\nfor x in range(1, 10):\n print(\"Processing\", x, \"...\")\n for k in range(1, 10000 * x):\n pass\n\n# Guess number\nfrom random import randrange\nprint(\"\\n\")\nn = randrange(100)\ncorrect = False\nt = 1\nT = 6\nwhile (not correct) and (t <= T):\n guess = int(input(\"Guess? \"))\n if guess > n:\n print(\"Too large\\n\")\n t = t + 1\n elif guess < n:\n print(\"Too small\\n\")\n t = t + 1\n else:\n correct = True\nif correct and (t <= T):\n print(\"Your guess is correct!\")\nelif (not correct) and (t > T):\n print(\"Game over. The number is\", n)\n\n# Defining functions\n\n\ndef abs(n):\n if n < 0:\n n = -n\n return n\n\n\ndef hello():\n print(\"Hello, world!\")\n\n\ndef area(width, height):\n return width * height\n","repo_name":"renkun-ken/learnPython","sub_path":"src/essential.py","file_name":"essential.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23129254416","text":"from threading import Timer\nimport shlex\n\nfrom gi.repository import GObject as gobject, Gtk as gtk\nfrom gi.repository import AppIndicator3 as appindicator\n\nfrom .indicatorConfig import IndicatorConfig\nfrom .menus import IndicatorMenu\nfrom .outputReader import OutputReader\nfrom .logViewer import LogViewer\n\nAPPINDICATOR_ID = 'networktablet'\n\n\nclass NetworktabletIndicator(gobject.GObject):\n\n indicator = None\n thread = None\n\n def __init__(self):\n super().__init__()\n self.indicator = appindicator.Indicator.new(\n APPINDICATOR_ID,\n \"input-tablet\",\n appindicator.IndicatorCategory.APPLICATION_STATUS\n )\n self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)\n\n self.config = IndicatorConfig()\n\n self.outputWindow = None\n\n self.menu = IndicatorMenu(self)\n self.indicator.set_menu(self.menu)\n\n self.textBuffer = gtk.TextBuffer()\n\n self.menu.toggleItem.set_active(self.config['enabled'])\n\n rootMenuItem = self.indicator \\\n .get_property('dbus-menu-server')\\\n .get_property('root-node')\n rootMenuItem.connect('about-to-show', self.menu.about_to_show)\n\n def handle_networktablet_output(self, output: str):\n self.debug_output(output)\n\n def quit(self):\n print('quitting')\n self.quit_networktablet()\n print('thread gone')\n gtk.main_quit()\n print('ghghagh')\n\n def networktablet_is_running(self) -> bool:\n return bool(self.thread and self.thread.is_running())\n\n def run_networktablet(self):\n if self.networktablet_is_running():\n self.thread.quit()\n self.thread = OutputReader(self)\n self.thread.start()\n Timer(1, self.menu.outputMenu.set_output).start()\n\n def quit_networktablet(self):\n if self.thread:\n self.thread.quit()\n\n def restart_networktablet(self):\n if self.networktablet_is_running():\n self.quit_networktablet()\n self.run_networktablet()\n\n def debug_output(self, output):\n if (isinstance(output, str)):\n output = output\n elif (hasattr(output, '__iter__')):\n output = ' '.join(shlex.quote(s) for s in output)+'\\n'\n print(output, end=\"\")\n end = self.textBuffer.get_end_iter()\n self.textBuffer.insert(end, output)\n\n def show_output(self):\n if self.outputWindow is not None:\n self.outputWindow.present()\n else:\n self.outputWindow = LogViewer(self, self.textBuffer)\n self.outputWindow.show_all()\n\n def dialog_closed(self):\n self.outputWindow = None\n","repo_name":"akdor1154/networktablet-indicator","sub_path":"networktablet_indicator/indicator.py","file_name":"indicator.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11141035210","text":"import os\r\nimport logging\r\nfrom pprint import pformat\r\nfrom datetime import datetime, timezone, timedelta\r\nfrom urllib.parse import urlparse\r\nfrom django.conf import settings\r\nfrom django.shortcuts import render\r\nfrom django.utils.cache import patch_vary_headers\r\n# from django.views.decorator.http import require_http_methods\r\nfrom django.http import HttpResponse, JsonResponse\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.views.decorators.gzip import gzip_page\r\n# from django.middleware.gzip import GZipMiddleware\r\n\r\nfrom requests import Session, Request\r\nfrom requests.exceptions import RequestException\r\n\r\nif not os.environ.get('ENABLE_LOGGING') == 'TRUE':\r\n logging.disable()\r\n\r\n\r\ndef split_set_cookies_header(set_cookie_csv):\r\n import re\r\n # assuming that at least one cookie is present in the input parameter.\r\n splitted = re.split(r',\\s(\\w+=)', set_cookie_csv)\r\n\r\n first_cookie = splitted[0]\r\n rest_of_cookies = splitted[1:]\r\n all_cookies = [first_cookie]\r\n\r\n for i in range(0, len(rest_of_cookies), 2):\r\n all_cookies.append(''.join(rest_of_cookies[i : i + 2]))\r\n\r\n return all_cookies\r\n\r\n\r\ndef ktm_time(*args):\r\n return (\r\n datetime.fromtimestamp(datetime.now().timestamp(), tz=timezone.utc)\r\n + timedelta(hours=5, minutes=45)\r\n ).timetuple()\r\n\r\nlogging.Formatter.converter = ktm_time\r\nlogger = logging.getLogger(__name__)\r\nlogger.setLevel(10)\r\n\r\n# whether the state is preserved or not depends on the call to `Request` or `Session`\r\n# specifically Request.prepare() doesnt apply state while Session.prepare_request() does\r\nSESS = Session()\r\n# !warning: when testing locally with http instead of https,\r\n# cookies that have secure flag will correctly not be sent.\r\n# this will create the subtlest of bug and the biggest of headache.\r\nif os.environ.get('fiddler') == '1':\r\n logger.info(' :::::::::::::::::::: USING Fiddler proxy for debugging ::::::::::::::::::: ')\r\n os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8866'\r\n os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8866'\r\n\r\nSUPPORTED_SCHEMES = ['https://', 'http://'] # order\r\n\r\n@csrf_exempt\r\n@gzip_page\r\ndef proxier(request, url):\r\n original_url = url\r\n # original_url_with_qparams = request.get_full_path()\r\n # view_url = request.build_absolute_uri()[:-(len(original_url_with_qparams) - len(view_path))]\r\n view_path = '/' + '/'.join(request.resolver_match.route.split('/')[:-1]) # /proxy\r\n logger.debug('\\n%sREQUEST RECEIVED%s', '-' * 30, '-' * 30)\r\n for scheme in SUPPORTED_SCHEMES:\r\n if url.startswith(scheme): break\r\n else:\r\n # Default to https\r\n url = SUPPORTED_SCHEMES[0] + url\r\n\r\n url_scheme, fallback_host, fallback_path , _, _, _ = urlparse(url)\r\n # convert to url for appending instead of passing as params cuz \")\r\n sys.exit(0)\r\n excel_name = sys.argv[1]\r\n if not exists(excel_name):\r\n print(\"Missing input file\")\r\n sys.exit(0)\r\n config_file = sys.argv[2]\r\n if not exists(config_file):\r\n print(\"Missing config file\")\r\n sys.exit(0)\r\n if not os.path.isdir(sys.argv[3]):\r\n print(\"Please provide existing directory\")\r\n sys.exit(0)\r\n\r\n # reading the excel\r\n expanded_df = pd.read_excel(excel_name, engine='openpyxl')\r\n file_location = sys.argv[3]\r\n\r\n # getting column names to add to the new files that will be created\r\n header = expanded_df.columns.values.tolist()\r\n\r\n # creating an array to store all the row indexes I will be deleting from example_expanded.xlsx\r\n rows_to_delete = [-1]\r\n\r\n filenames = list()\r\n\r\n # reading data_config_file.txt:\r\n with open(config_file, 'r') as f:\r\n # Set debug mode\r\n global DEBUG_MODE\r\n DEBUG_MODE = \"DEBUG_MODE ON\" in f.read()\r\n global DEBUG_DEEP\r\n DEBUG_DEEP = \"DEBUG_DEEP ON\" in f.read()\r\n\r\n # Reset file pointer to initial position\r\n f.seek(0)\r\n\r\n # iterating through all the lines in data_config_file.txt\r\n for line in f:\r\n if not line.startswith(\"#\") and line.strip():\r\n special_treatment = False\r\n and_treament = False\r\n print(line.replace(\"\\n\",\"\"))\r\n # separating each argument of the line in different variables\r\n arg_list = line.split(\"|\")\r\n\r\n # get output file & column_to_search from args list and remove spaces from the end and the start of the strings\r\n output_file = arg_list.pop(0).strip()\r\n column_to_search = arg_list.pop(0).strip()\r\n if column_to_search.startswith(\"&\"):\r\n and_treament = True\r\n column_to_search = column_to_search[1:]\r\n #remove spaces from the end and the start of the keys to search strings\r\n arg_list = [arg.strip() for arg in arg_list]\r\n arg_list[-1] = arg_list[-1].replace(\"\\n\",\"\") \r\n for entry in arg_list:\r\n if entry.startswith(\"!\"):\r\n special_treatment = True\r\n \r\n output_file_path = os.path.join(file_location, output_file)\r\n\r\n print(\"Saving on file |\" + output_file_path + \"|\")\r\n if DEBUG_MODE:\r\n print(\"Searching on column |\" + column_to_search + \"| for |\" + \"|\".join(arg_list) + \"|\")\r\n\r\n # checking if file already exists, if it does, the dataframe I will use will be from the already existing xlsx file\r\n # if not, create a new dataframe\r\n if output_file_path in filenames:\r\n if special_treatment == True:\r\n read_df = pd.read_excel(output_file_path, engine='openpyxl')\r\n rows_to_delete = append_records(read_df, header, output_file_path, column_to_search, arg_list, rows_to_delete,\"\",expanded_df,\"not\")\r\n print(rows_to_delete)\r\n elif and_treament == True:\r\n read_df = pd.read_excel(output_file_path, engine='openpyxl')\r\n rows_to_delete = append_records(read_df, header, output_file_path, column_to_search, arg_list, rows_to_delete,\"\",expanded_df,\"and\")\r\n print(rows_to_delete)\r\n else:\r\n read_df = pd.read_excel(output_file_path, engine='openpyxl')\r\n rows_to_delete = append_records(expanded_df, header, output_file_path, column_to_search, arg_list, rows_to_delete, read_df,\"\",\"or\")\r\n print(rows_to_delete)\r\n\r\n \r\n # filenames_column.append(output_file_path + column_to_search)\r\n else:\r\n expanded_delete.delete_rows(excel_name, rows_to_delete)\r\n expanded_df = pd.read_excel(excel_name, engine='openpyxl')\r\n rows_to_delete = [-1]\r\n rows_to_delete = append_records(expanded_df, header, output_file_path, column_to_search, arg_list, rows_to_delete,\"\",\"\",\"\")\r\n if rows_to_delete != []:\r\n filenames.append(output_file_path)\r\n print(rows_to_delete)\r\n\r\n # filenames_column.append(output_file_path + column_to_search)\r\n\r\n\r\n # delete the rows (goes to expanded_delete.py file)\r\n expanded_delete.delete_rows(excel_name, rows_to_delete)\r\n \r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ejbest/some","sub_path":"processScans3.py","file_name":"processScans3.py","file_ext":"py","file_size_in_byte":9579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73286811466","text":"import os\nimport sys\nimport pdb\nimport bpy\nimport numpy as np\nimport addon_utils\nfrom .decorators import forall\n\n# correspondance of bridge elements to ifc classes\nifc_classes = { 'traverse': 'IfcRoof',\n 'piedroit': 'IfcColumn',\n 'mur':'IfcWall',\n 'gousset': 'IfcSlab',\n 'corniche': 'IfcPlate',\n 'ground': 'IfcFooting'\n }\n\n\nif sys.platform == 'darwin':\n blenderbim_path = 'addons/blenderbim-230506-py310-macosm1.zip'\nelif sys.platform == 'linux':\n blenderbim_path = 'addons/blenderbim-230504-py310-linux.zip' #'addons/blenderbim-230304-py310-linux.zip'\n\n\ndef enable_blenderBIM():\n \"\"\"\n enable blenderBIM if it is not enabled, install it and then enable it\n if it is not installed\n \"\"\"\n # if addon is enabled, do nothing\n if 'blenderbim' in [ad.module for ad in bpy.context.preferences.addons]:\n return\n blenderbim = [mod for mod in addon_utils.modules() if mod.bl_info['name'].lower()=='blenderbim']\n if blenderbim:\n # if addon is installed but non enabled, enable it\n bpy.ops.preferences.addon_enable(module='blenderbim')\n else:\n bpy.ops.preferences.addon_install(filepath=os.path.abspath(blenderbim_path))\n bpy.ops.preferences.addon_enable(module='blenderbim')\n # save user preferences\n bpy.ops.wm.save_userpref()\n\n\n@forall\ndef assign_ifc_classes(object):\n \"\"\"\n assign an ifc class to an object, based on the collection it belongs to\n and the ifc_classes dictionary defined above\n \"\"\"\n if object.users_collection[0].name not in ifc_classes.keys():\n return\n # deselect possibly selected objects\n [obj.select_set(False) for obj in bpy.data.objects]\n # select object\n object.select_set(True)\n # assign ifc class\n bpy.ops.bim.assign_class(ifc_class=ifc_classes[object.users_collection[0].name])\n # move object to IFC storey\n bpy.data.collections['IfcBuildingStorey/My Storey'].objects.link(object)\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n# ----------------------------DEBUGGING-----------------------------\n# for attr in dir(variable):\n# print('{} : {}'.format(attr, getattr(variable, attr)))\n# ------------------------------------------------------------------\n","repo_name":"tati-/AI_assisted_bridge_inspection","sub_path":"src/modules/ifc_utils.py","file_name":"ifc_utils.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16945512008","text":"# Python Fibonacci series Program using While Loop\n\n# Fibonacci series will start at 0 and travel upto below number\nNumber = int(input(\"\\nPlease Enter the Range Number: \"))\n\n# Initializing First and Second Values of a Series\ni = 0\nFirst_Value = 0\nSecond_Value = 1\n\n# Find & Displaying Fibonacci series\nwhile (i < Number):\n if (i <= 1):\n Next = i\n else:\n Next = First_Value + Second_Value\n First_Value = Second_Value\n Second_Value = Next\n print(Next)\n i = i + 1\n\n\n # ? Remove Reptatd Elmet\n\nn = int(input(\n \"How many 'Elements' you want to store?\"\n))\nlst = []\nfor item in range(n):\n lst.append(input())\nprint(lst)\n\nlst = list(dict.fromkeys(lst))\nprint(lst)\n","repo_name":"nayneshrathod/imrjalgoan","sub_path":"Program_no_4.py","file_name":"Program_no_4.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5802310577","text":"def oddTuples(aTup):\n '''\n aTup: a tuple\n\n returns: tuple, every other element of aTup. \n '''\n odds = tuple()\n index = 0\n for item in aTup:\n if index % 2 == 0:\n odds = odds + (aTup[index],)\n index += 1\n return odds\n\n\noddTuples(('I', 'am', 'a', 'test', 'tuple'))\n","repo_name":"acjr1910/cs-intro","sub_path":"unit_3/oddTuples.py","file_name":"oddTuples.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2455797328","text":"from scratchtobat import main, common, common_testing\nimport unittest\nimport os\nimport shutil\n\n\nclass MainTest(common_testing.ProjectTestCase):\n\n def assert_main_success(self, args, project_name):\n output_zip_dir = common.get_testoutput_path(\"output_zips\")\n return_val = main.scratchtobat_main(args)\n self.assertEqual(main.EXIT_SUCCESS, return_val)\n for file_ in os.listdir(output_zip_dir):\n if file_.endswith(\".zip\"):\n zip_path = os.path.join(file_)\n self.assertCorrectZipFile(zip_path, project_name)\n shutil.rmtree(output_zip_dir)\n\n def test_can_provide_catroid_project_for_scratch_link(self):\n for project_url, project_name in common_testing.TEST_PROJECT_URL_TO_NAME_MAP.iteritems():\n output_zip_dir = common.get_testoutput_path(\"output_zips\")\n self.assert_main_success([project_url, output_zip_dir], project_name)\n\n def test_can_provide_catroid_project_for_scratch_file(self):\n for project_file, project_name in common_testing.TEST_PROJECT_FILES_TO_NAME_MAP.iteritems():\n output_zip_dir = common.get_testoutput_path(\"output_zips\")\n self.assert_main_success([common.get_test_project_unpacked_file(project_file), output_zip_dir], project_name)\n\n\nif __name__ == \"__main__\":\n # import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n","repo_name":"chwt/ScratchToCatrobat","sub_path":"src/scratchtobat/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"11967250057","text":"from enum import Flag\nimport re\nimport os\nfrom itertools import cycle\n\n\n# * Si el email es valido retorna un array con [True, email], si no retorna False\ndef validar_mail(email):\n regex_mail = '^(\\w|\\.|\\_|\\-)+[@](\\w|\\_|\\-|\\.)+[.]\\w{2,3}$'\n\n if(re.search(regex_mail, email)):\n print(\"Email valido\")\n email_extraido = re.search(regex_mail, email).group()\n return [True, email_extraido]\n else:\n print(\"Email invalido\")\n return False\n\n\n# * Si el rol es valido retorna un array con [True, rol], si no retorna False\ndef validar_rol(rol):\n\n regex_rol = re.compile(\"[a-zA-Z]-\\d+-\\d{4}\") \n\n # * [a-zA-Z] hace match a un solo caracter entre a y z o A y Z seguido de un guion\n # * \\d hace match a un numero y con el mas a 1 o mas numeros seguidos de un guion\n # * \\d{4} hace match a exactamente 4 numeros\n if(re.search(regex_rol, rol)):\n rol_extraido = regex_rol.search(rol).group()\n print(\"Rol valido\")\n rol_extraido = re.search(regex_rol, rol).group()\n return [True, rol_extraido]\n else:\n print(\"Rol no valido\")\n return False\n \n\n# * Calcula el digito verificador y lo compara con el dado retorna True o False segun corresponda\ndef validar_rut(rut):\n rut = rut.replace(\".\",\"\")\n rut_split = rut.split('-')\n reversed_digits = map(int, reversed(str(rut_split[0])))\n factors = cycle(range(2, 8))\n s = sum(d * f for d, f in zip(reversed_digits, factors))\n dv = (-s) % 11\n\n if str(dv) == rut_split[1]:\n print(\"El rut es valido\")\n return True\n else:\n print(\"El rut es invalido\")\n return False","repo_name":"jnunezd/Validaciones","sub_path":"validations.py","file_name":"validations.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19918438135","text":"import wiringpi as wiringpi\nfrom time import sleep\nfrom signal import signal, SIGTERM, SIGHUP, pause\nfrom rpi_lcd import LCD\n\nwiringpi.wiringPiSetupGpio()\nlcd=LCD()\ndef safe_exit(signum, frame):\n exit(1)\nwiringpi.pinMode(25,0)\ncount=0\nwhile(True):\n #my_input=wiringpi.analogRead(25)\n my_input=wiringpi.digitalRead(25)\n if(my_input):\n lcd.text(\"NoAlchohol\",2)\n else:\n lcd.text(\"AlchoholDetected\",2)\n sleep(1)\ntry:\n signal(SIGTERM, safe_exit)\n signal(SIGHUP ,safe_exit)\n pause()\nexcept KeyboardInterrupt:\n pass","repo_name":"Sudhanva10/JARVIS-Human-Assistant-","sub_path":"Touch+Gas/MQ3LCD.py","file_name":"MQ3LCD.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9411830755","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch.optim as optim\nimport torch.nn as nn\nimport numpy as np\nimport torch\n#from visdom import Visdom\n\n\n# In[2]:\n\n\nclass Score(object):\n iou_thresholds = np.array([0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])\n iou_thresholds = torch.from_numpy(iou_thresholds).float()\n def iou(self,img_ture,img_pred):\n \"\"\"\n 计算两张图片的iou\n \"\"\"\n img_pred = (img_pred>0).float()#预测值中大于0的预测为正像素,像素值设为1。注意,这只是用于掩码\n i = (img_ture*img_pred).sum()\n u = (img_ture+img_pred).sum()-i\n return i / u if u != 0 else u\n def __call__(self,img_true,img_pred,device):\n \"\"\"\n 可批量计算图片的交并比\n 对于我的模型,输出img_pred的形状为(batch_size,H,W)\n 但是读取的掩码却是(batch_size,channels(1),H,W)\n 所以需要调整维度\n \"\"\"\n img_pred = torch.squeeze(img_pred)#将掩码维度调整为(batch_size,H,W)\n if img_true.device.type == 'cuda':\n self.iou_thresholds = self.iou_thresholds.to(device)\n num_imgs = len(img_true)\n scores = np.zeros(num_imgs)\n for i in range(num_imgs):\n if img_true[i].sum()==img_pred[i].sum()==0:\n scores[i]=1\n else:\n scores[i] = ((self.iou_thresholds<=self.iou(img_true[i],img_pred[i]))).float().mean()\n #计算每张图片在不同阈值下的得到的平均值\n return scores.mean()\n\n\n# In[3]:\n\nclass Train(object): \n scores=Score()\n def __init__(self,model,train_loader,value_loader,device,cerition = nn.BCEWithLogitsLoss(),lr=0.001,num_epochs=100):\n self.train_loader = train_loader\n self.value_loader = value_loader\n self.lr = lr\n self.num_epochs = num_epochs\n self.model = model\n self.device = device\n self.total_step_one_epoch = len(train_loader)\n self.optimizer = optim.Adam(self.model.parameters(),lr=lr)\n self.cerition = cerition\n self.schedulr = optim.lr_scheduler.StepLR(self.optimizer,step_size=20,gamma=0.1)\n def __call__(self):\n loss_t = []\n loss_v = []\n iou_s = []\n #vis = Visdom()\n #vis.line([[0.0,0.0]],[0.0],win=\"loss\",opts = dict(title='loss',legend=['train_loss','test_loss']))\n #vis.line([0.0],[0.0],win=\"iou\",opts=dict(title='iou_score'))\n for epoch in range(self.num_epochs):\n self.schedulr.step()\n loss_train = self.train(epoch)\n loss_test,s = self.value()\n print(epoch,':----',loss_train,'----',loss_test,'----',s)\n loss_t.append(loss_train)\n loss_v.append(loss_test)\n iou_s.append(s)\n #vis.line([s.item()],[epoch],win='iou',update='append')\n #vis.line([[loss_train,loss_test]],[epoch],win='loss',update='append')\n return loss_t,iou_s,loss_v\n def train(self,epoch):\n total_loss,nums = 0,0\n self.model.train()\n for i,data in enumerate(self.train_loader):\n \n img,msk = data\n msk = torch.squeeze(msk)\n num = len(img)\n nums+=num\n img = img.to(self.device)\n msk = msk.to(self.device)\n outputs = self.model(img)\n del img\n loss = self.cerition(outputs,msk)\n total_loss+=(loss.item()*num)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if i%10 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, self.num_epochs, i+1, self.total_step_one_epoch, loss.item())) \n return total_loss/nums\n def value(self):\n self.model.eval()\n total_loss,nums,s = 0,0,0\n with torch.no_grad():\n for i, (img,msk) in enumerate(self.value_loader):\n msk = torch.squeeze(msk)\n num = len(img)\n img = img.to(self.device)\n msk = msk.to(self.device)\n outputs = self.model(img)\n del img\n loss = self.cerition(outputs,msk)\n total_loss+=(loss.item()*num)\n s += (self.scores(msk,outputs,self.device)*num)\n nums+=num\n return total_loss/nums,s/nums\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"hyt1407/Kaggle_TGS","sub_path":"utils/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21746112739","text":"import pytesseract as tess\nimport glob\nimport cv2\n\ntess.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'\n\nfinaltxt = []\n\n# for a in range(1, 88):\n\n\nfor im in glob.glob(\"Biopod/Cropped/*.jpg\"):\n img = cv2.imread(im)\n txt = tess.pytesseract.image_to_string(img)\n lst = txt.splitlines()\n\n for i in range(len(lst)):\n if not lst[i].strip():\n continue\n finaltxt.append(lst[i])\n\n# for z in range(len(finaltxt)):\n# print(finaltxt[z])\n\n# with open('List.txt', 'w') as f:\n# [f.write(\"%s\\n\" % item) for item in finaltxt]\n\nfulltext = '\\n'.join(finaltxt)\nwith open(\"Biopod.txt\", \"w\") as output:\n output.write(fulltext)\n print(\"successfully saved!\")\n","repo_name":"shishir2sr/ImagetoText","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"390248545","text":"# SPDX-License-Identifier: BSD-3-Clause\n\nimport json\nfrom importlib import resources\nfrom typing import Optional\nfrom pathlib import Path\n\nfrom ..core.strutils import expand_variables\nfrom ..config import (\n\tSOURCE_DIR, DLD_DIR\n)\n\n__all__ = (\n\t'load_components',\n\t'get_paths',\n\t'component_archive_path',\n\t'component_source_path'\n)\n\ndef component_archive_path(name: str, version: str, filename: str) -> Path:\n\treturn (\n\t\t(DLD_DIR / name) / f'{version}{\"\".join(Path(filename).suffixes)}'\n\t)\n\ndef component_source_path(name: str, version: str) -> Path:\n\treturn (\n\t\t(SOURCE_DIR / name) / version\n\t)\n\ndef load_components() -> dict:\n\treturn json.loads(\n\t\tresources.read_text(__name__, 'components.json')\n\t)['components']\n\n\ndef get_paths(components: Optional[dict] = None, for_all: bool = False) -> list[dict]:\n\tif components is None:\n\t\tcomponents = load_components()\n\n\tcomponent_paths = list()\n\n\tfor name, details in components.items():\n\t\tif for_all:\n\t\t\tfor ver in details['versions']:\n\t\t\t\tcomponent_paths.append({\n\t\t\t\t\t'component': name,\n\t\t\t\t\t'url': expand_variables(\n\t\t\t\t\t\tf'{details[\"url\"]}/{details[\"filename\"]}', {'VERSION': ver['version']}\n\t\t\t\t\t),\n\t\t\t\t\t'version': ver['version'],\n\t\t\t\t\t'archive_path': component_archive_path(name, ver['version'], details['filename']),\n\t\t\t\t\t'source_path': component_source_path(name, ver['version']),\n\t\t\t\t\t'sha512sum': ver['sha512sum']\n\t\t\t\t})\n\t\telse:\n\t\t\tcomponent_paths.append({\n\t\t\t\t'component': name,\n\t\t\t\t'url': expand_variables(\n\t\t\t\t\tf'{details[\"url\"]}/{details[\"filename\"]}', {'VERSION': details['latest']}\n\t\t\t\t),\n\t\t\t\t'version': details['latest'],\n\t\t\t\t'archive_path': component_archive_path(name, details['latest'], details['filename']),\n\t\t\t\t'source_path': component_source_path(name, details['latest']),\n\t\t\t\t'sha512sum': list(filter(\n\t\t\t\t\tlambda v: v['version'] == details['latest'],\n\t\t\t\t\tdetails['versions']\n\t\t\t\t))[0]['sha512sum']\n\t\t\t})\n\n\treturn component_paths\n","repo_name":"lethalbit/xc-build","sub_path":"xc_build/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17912136431","text":"# LEETCODE 572: Recursive DFS For Subtree of another Tree\n# Time: O(root*subRoot) -> O(n)\n# Space: BEST O(logn) WORST O(n)\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isSubtree(self, root: Optional[TreeNode], subRoot: Optional[TreeNode]) -> bool:\n # If empty then it will be a subtree (Base Case)\n if not subRoot:\n return True\n # If there is no tree when subtree is not empty (due to previous case)\n if not root:\n return False\n # Check if they are the same trees\n if self.sameTree(root, subRoot):\n return True\n\n # If not same subtree, then go to next node (DFS)\n return (self.isSubtree(root.left, subRoot) or self.isSubtree(root.right, subRoot))\n\n\n def sameTree(self, root, subRoot):\n # Base Case 1\n if not root and not subRoot:\n return True\n # Base Case 2: If both exist and if both values are equal to each other\n if root and subRoot and root.val == subRoot.val:\n return (self.sameTree(root.left, subRoot.left) and self.sameTree(root.right, subRoot.right))\n\n return False\n","repo_name":"darshjadhav/LeetCode_HackerRank_Solutions","sub_path":"LeetCode/Blind75/Trees/SubTreeOfAnotherTree.py","file_name":"SubTreeOfAnotherTree.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40479649510","text":"import threading\nimport queue\nimport time\n\n# Define the worker function\ndef worker(task_queue):\n while True:\n task = task_queue.get()\n if task is None: # Sentinel value to exit thread\n break\n # Execute the task (for this example, just sleep)\n time.sleep(task)\n print(f\"Completed task of {task} seconds.\")\n task_queue.task_done()\n\n# Create the task queue\ntasks = queue.Queue()\n\n# Start a fixed number of worker threads\nnum_threads = 5\nthreads = []\nfor _ in range(num_threads):\n t = threading.Thread(target=worker, args=(tasks,))\n t.start()\n threads.append(t)\n\n# Add tasks to the queue (for this example, random sleep durations)\nfor _ in range(20):\n tasks.put(1) # Sleep for 1 second\n\n# Wait for all tasks to complete\ntasks.join()\n\n# Stop the worker threads\nfor _ in range(num_threads):\n tasks.put(None)\nfor t in threads:\n t.join()\n\nprint(\"All tasks completed!\")\n","repo_name":"apinanyogaratnam/atom-linker","sub_path":"experimental/thread_pool.py","file_name":"thread_pool.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25710002755","text":"import os\nimport datetime\n\nfrom dotenv import load_dotenv\nfrom pymongo import MongoClient\n\nload_dotenv()\nMONGO_URI = os.environ[\"MONGO_URI\"]\n\nclient = MongoClient(MONGO_URI)\n\n# Get reference to 'bank' database\ndb = client.bank\n\n# Get reference to 'accounts' collection\naccounts_collection = db.accounts\n\nnew_account = {\n \"account_holder\": \"Linus Torvalds\",\n \"account_id\": \"MDB829001337\",\n \"account_type\": \"checking\",\n \"balance\": 50352434,\n \"last_updated\": datetime.datetime.utcnow(),\n}\n\n# Write an expression that inserts the 'new_account' document into the 'accounts' collection.\nresult = accounts_collection.insert_one(new_account)\n\ndocument_id = result.inserted_id\nprint(f\"_id of inserted document: {document_id}\")\n\nclient.close()\n","repo_name":"herysantos/mongodb-python-best-practices","sub_path":"2_insert_single.py","file_name":"2_insert_single.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6596861431","text":"import os\nfrom typing import List\nimport shutil\nimport json\n\n\nimport tqdm\nimport tensorflow as tf\nimport numpy as np\n\n\nfrom mltk.datasets.audio.speech_commands import speech_commands_v2\nfrom mltk.core.preprocess.audio.audio_feature_generator import AudioFeatureGeneratorSettings\nfrom mltk.core.preprocess.utils import tf_dataset\nimport mltk.core.preprocess.utils.audio as audio_utils\nfrom mltk.core import load_tflite_model\nfrom mltk.core import TfliteModelParameters\nfrom mltk.utils.python import install_pip_package\n\n\ninstall_pip_package('noisereduce')\nimport noisereduce\n\n\ndef clean_dataset(\n model:str,\n classes:List[str],\n dst_dir:str=None\n):\n tflite_model = load_tflite_model(model)\n tflite_parameters = TfliteModelParameters.load_from_tflite_model(tflite_model)\n frontend_settings = AudioFeatureGeneratorSettings(**tflite_parameters)\n tflite_model_classes = tflite_parameters['classes']\n\n src_dataset_dir = speech_commands_v2.load_data()\n dst_dataset_dir = dst_dir or f'{src_dataset_dir}/_cleaned'\n\n class_counts = {}\n features_ds, labels_ds = tf_dataset.load_audio_directory(\n directory=src_dataset_dir,\n classes=classes,\n return_audio_data=False,\n class_counts=class_counts,\n list_valid_filenames_in_directory_function=speech_commands_v2.list_valid_filenames_in_directory\n )\n\n ds = tf.data.Dataset.zip((features_ds, labels_ds))\n for src_path, class_id in tqdm.tqdm(ds.as_numpy_iterator(), unit='sample', total=sum(class_counts.values())):\n src_path = src_path.decode('utf-8')\n fn = os.path.basename(src_path)\n class_label = classes[class_id]\n tflite_model_class_id = tflite_model_classes.index(class_label)\n sure_dst_dir = f'{dst_dataset_dir}/{class_label}'\n unsure_dst_dir = f'{dst_dataset_dir}/_unsure/{class_label}'\n invalid_dst_dir = f'{dst_dataset_dir}/_invalid/{class_label}'\n\n os.makedirs(sure_dst_dir, exist_ok=True)\n os.makedirs(unsure_dst_dir, exist_ok=True)\n os.makedirs(invalid_dst_dir, exist_ok=True)\n\n sample, sr = audio_utils.read_audio_file(src_path, return_sample_rate=True)\n\n sample = noisereduce.reduce_noise(\n y=sample,\n sr=sr,\n stationary=True\n )\n\n if sr != frontend_settings.sample_rate_hz:\n sample = audio_utils.resample(sample, orig_sr=sr, target_sr=frontend_settings.sample_rate_hz)\n\n # Adjust the audio clip to the length defined in the frontend_settings\n out_length = int((frontend_settings.sample_rate_hz * frontend_settings.sample_length_ms) / 1000)\n adjusted_sample = audio_utils.adjust_length(\n sample,\n out_length=out_length,\n trim_threshold_db=40,\n offset=0\n )\n\n spectrogram = audio_utils.apply_frontend(\n sample=adjusted_sample,\n settings=frontend_settings,\n dtype=tflite_model.outputs[0].dtype\n )\n # The output spectrogram is 2D, add a channel dimension to make it 3D:\n # (height, width, channels=1)\n spectrogram = np.expand_dims(spectrogram, axis=-1)\n spectrogram = np.expand_dims(spectrogram, axis=0)\n\n preds = tflite_model.predict(spectrogram, verbose=False, y_dtype=np.float32)[0]\n pred_class_id = np.argmax(preds)\n pred = preds[pred_class_id]\n\n if pred_class_id == tflite_model_class_id:\n if pred > .95:\n shutil.copy(src_path, f'{sure_dst_dir}/{fn}')\n else:\n shutil.copy(src_path, f'{unsure_dst_dir}/{fn}')\n else:\n shutil.copy(src_path, f'{invalid_dst_dir}/{fn}')\n\n\ndef list_valid_samples():\n classes = ['on', 'off', 'left', 'right', 'up', 'down', 'stop', 'go']\n original_dataset_dir = speech_commands_v2.load_data()\n cleaned_dataset_dir = speech_commands_v2.load_clean_data()\n\n invalid_samples = {}\n for class_label in classes:\n valid_samples = list(os.listdir(f'{cleaned_dataset_dir}/{class_label}'))\n invalid_samples[class_label] = []\n for fn in os.listdir(f'{original_dataset_dir}/{class_label}'):\n if fn not in valid_samples:\n invalid_samples[class_label].append(fn)\n\n valid_path = os.path.dirname(speech_commands_v2.__file__) + '/invalid_samples.py'\n with open(valid_path, 'w') as f:\n f.write('# This file was auto-generated\\n\\n')\n f.write('# This contains invalid samples for the following classes:\\n')\n for class_label in classes:\n f.write(f'# {class_label}\\n')\n\n f.write('\\nINVALID_SAMPLES = ')\n json.dump(invalid_samples, f, indent=2)\n\n\nif __name__ == '__main__':\n # clean_dataset(\n # 'keyword_spotting_on_off_v3',\n # classes=('on', 'off')\n # )\n list_valid_samples()","repo_name":"SiliconLabs/mltk","sub_path":"mltk/datasets/audio/speech_commands/clean_dataset.py","file_name":"clean_dataset.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"81"} +{"seq_id":"35260390280","text":"from django.db import models\r\nfrom django.contrib.auth.models import User\r\n# from PIL import Image\r\n\r\n\r\nclass User_Profile(models.Model):\r\n\r\n ROLE_OPTIONS = (\r\n ('animator', 'Animator'),\r\n ('scriptwriter', 'Scriptwriter'),\r\n ('audio engineer', 'Audio Engineer'),\r\n ('graphic design', 'Graphic Design'),\r\n ('videographer', 'Videographer'),\r\n ('video editor', 'Video Editor'),\r\n ('none', 'None')\r\n )\r\n\r\n COMMITMENT_LEVEL = (\r\n ('casual', 'Casual'),\r\n ('side project', 'Side Project'),\r\n ('1 priority', '#1 Priority')\r\n )\r\n\r\n\r\n user = models.OneToOneField(User, on_delete=models.CASCADE)\r\n user_bio = models.TextField(max_length=320, default=\"\", blank=True)\r\n user_yoe = models.IntegerField(default=0)\r\n user_role = models.CharField(max_length=80, choices=ROLE_OPTIONS, default='none')\r\n user_commitment_level = models.CharField(max_length=80, choices=COMMITMENT_LEVEL, default='Casual')\r\n # image = models.ImageField(default='default.jpg', upload_to='profile_pics')\r\n\r\n \r\n def __str__(self):\r\n return f'{self.user.username} Profile'\r\n \r\n\r\n","repo_name":"Brooks-Mitchell/creative_connect","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27016710606","text":"\"\"\"\nImplement a function that adds two numbers together and returns their\nsum in binary. The conversion can be done before, or after the addition.\n\nThe binary number returned should be a string.\n\"\"\"\n\ndef add_binary(a, b):\n\n def decimalToBinary(n):\n # Make blank str for binary x.\n x = \"\"\n # Loop, take remainder (1 or 0) and concat to x until n < 1.\n while n > 1:\n x += str(n % 2)\n n = n // 2\n # When loop not true concat final remainder (1 or 0) to x.\n else:\n x += str(n % 2)\n # Reverse string to read binary correctly.\n x[::-1]\n return x\n\n return decimalToBinary(a + b)\n\n# Cheat and use built in function\n# def add_binary(a, b):\n# # Returns string with binary conversion and leading \"0b\" removed.\n# return bin(a + b)[2:]\n\n\n\nprint(add_binary(1,1),\"10\")\nprint(add_binary(0,1),\"1\")\nprint(add_binary(1,0),\"1\")\nprint(add_binary(2,2),\"100\")\nprint(add_binary(51,12),\"111111\")\n","repo_name":"ConstantArguments/CodeWars","sub_path":"binary_sum.py","file_name":"binary_sum.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11323479258","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 24 18:21:35 2022\n\n@author: Sneha Sree\n\"\"\"\n\n\n \nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg_rgb = cv2.imread('image.png')\nimg_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\ntemplate1 = cv2.imread('template1.png',0)\ntemplate2=cv2.imread('template2.png',0)\ntemplate3=cv2.imread('template3.png',0)\nh1, w1 = template1.shape[::]\nh2, w2 = template2.shape[::]\nh3, w3 = template3.shape[::]\n\nres1 = cv2.matchTemplate(img_gray, template1, cv2.TM_CCOEFF_NORMED)\nres2= cv2.matchTemplate(img_gray, template2, cv2.TM_CCOEFF_NORMED)\nres3= cv2.matchTemplate(img_gray, template3, cv2.TM_CCOEFF_NORMED)\n\nthreshold1 = 0.95 #Pick only values above 0.8 since for TM_CCOEFF_NORMED, larger values = good fit.\n\nloc1 = np.where( res1 >= threshold1) \n#Outputs 2 arrays. Combine these arrays to get x,y coordinates - take x from one array and y from the other.\n\n#ZIP function is an iterator of tuples where first item in each iterator is paired together,\n#then the second item and then third, etc. \n\nfor pt in zip(*loc1[::-1]): #-1 to swap the values as we assign x and y coordinate to draw the rectangle. \n #Draw rectangle around each object. We know the top left (pt), draw rectangle to match the size of the template image.\n cv2.rectangle(img_rgb, pt, (pt[0]+w1 , pt[1]+h1), (0, 0,255), 1) #red rectangles with thickness 1. \n \nthreshold2 = 0.95\nloc2 = np.where( res2 >= threshold2) \n\nfor pt in zip(*loc2[::-1]): \n cv2.rectangle(img_rgb, pt, (pt[0] + w2, pt[1] + h2), (0, 0, 255), 1) \n \nthreshold3 = 1.0\nloc3 = np.where( res3 >= threshold3) \n\nfor pt in zip(*loc3[::-1]): \n cv2.rectangle(img_rgb, pt, (pt[0] + w3, pt[1] + h3), (0, 0, 255), 1) \n\n#cv2.imwrite('images/template_matched.jpg', img_rgb)\ncv2.imshow(\"Matched image\", img_rgb)\ncv2.waitKey()","repo_name":"arihantjain124/PDF_Parser","sub_path":"imagingTrackPython/templateMatching.py","file_name":"templateMatching.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7214173384","text":"from nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom nltk import download\n\nclass SentimentAnalysis():\n\n def __init__(self):\n self.sid = SentimentIntensityAnalyzer()\n\n def get_score(self, lyrics):\n # 1 = Positive\n # 0 = Neutral\n # -1 = Negative\n # Values are continuous in the range -1 1\n comp = self.sid.polarity_scores(lyrics)\n print(comp)\n comp = comp['compound']\n return comp\n\n def get_sentiment(self, lyrics):\n score = self.get_score(lyrics=lyrics)\n if score >= 0.5:\n return 1\n elif score > -0.5 and score < 0.5:\n return 0\n else:\n return -1\n\n\nif __name__ == '__main__':\n # run once\n # download('vader_lexicon')\n sa = SentimentAnalysis()\n lyrics = \"\"\"I wanna die.\"\"\"\n\n s = sa.get_sentiment(lyrics=lyrics)\n print(s)\n","repo_name":"aakash94/WIP","sub_path":"src/SentimentAnalysis.py","file_name":"SentimentAnalysis.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34144769271","text":"\nimport numpy as np\nfrom tensorflow.keras.utils import to_categorical\nimport matplotlib.pyplot as plt\n\n\nimport random\ndef calAccuracies_acc2letters(cm):\n nChars = cm.shape[0]\n prec = np.zeros(nChars,)\n rec = np.zeros(nChars, )\n f1 = np.zeros(nChars,)\n for i in range(nChars):\n nTP = cm[i,i]\n nFP = np.sum(cm[:,i]) - nTP\n nFN = np.sum(cm[i,:]) - nTP\n prec[i] = nTP/ (nTP + nFP)\n rec[i] = nTP / (nTP + nFN)\n f1[i] = 2 * nTP / (2*nTP+ nFP + nFN)\n return prec, rec, f1\n\n\ndef drawHistory(hist):\n fig, loss_ax = plt.subplots()\n acc_ax = loss_ax.twinx()\n\n loss_ax.plot(hist.history['loss'], 'y', label='train loss')\n loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\n loss_ax.set_xlabel('epoch')\n loss_ax.set_ylabel('loss')\n loss_ax.legend(loc='upper left')\n\n acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\n acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')\n acc_ax.set_ylabel('accuracy')\n acc_ax.legend(loc='lower left')\n\n plt.show()\n\ndef getTargetData(data, bRemoveSameShapeInfo):\n nSubjects = data.shape[0]\n nTrials = data.shape[1]\n nChars = data.shape[2]\n\n Y = np.zeros([nSubjects, nTrials, nChars])\n for i in range(nChars):\n if bRemoveSameShapeInfo==0:\n Y[:, :, i] = i\n else:\n if i<18:\n Y[:, :, i] = i\n elif i==18:\n Y[:, :, i] = 1\n elif i<21:\n Y[:, :, i] = i-1\n elif i==21:\n Y[:, :, i] = 4\n elif i<24:\n Y[:, :, i] = i-2\n elif i==24:\n Y[:, :, i] = 0\n elif i<29:\n Y[:, :, i] = i-3\n elif i==29:\n Y[:, :, i] = 7\n else:\n Y[:, :, i] = i-4\n return Y\n\n#학습/validation/ test로 분할\ndef spilt_data_for_exp(data, Y):\n nSubjects = data.shape[0]\n nArrayLen = data.shape[3]\n nChannels = data.shape[4]\n\n idx = np.array(range(nSubjects))\n print(f'---------------{idx[-1]}')\n random.shuffle(idx)\n nTrain = 19\n nVal = 0\n nTest = 2\n\n XTrain = data[idx[:nTrain],:,:,:,:]\n #XVal = data[idx[nTrain:nTrain+nVal],:,:,:,:]\n XTest = data[idx[nTrain + nVal:], :, :, :, :]\n\n YTrain= Y[idx[:nTrain]]\n #YVal = Y[idx[nTrain:nTrain+nVal]]\n YTest = Y[idx[nTrain + nVal:]]\n\n XTrain = XTrain.reshape([-1,nArrayLen, nChannels])\n #XVal = XVal.reshape([-1, nArrayLen, nChannels])\n XTest = XTest.reshape([-1, nArrayLen, nChannels])\n YTrain = YTrain.reshape([-1, ])\n #YVal = YVal.reshape([-1, ])\n YTest = YTest.reshape([-1, ])\n\n YTrain = to_categorical(YTrain)\n #YVal = to_categorical(YVal)\n YTest = to_categorical(YTest)\n\n #return XTrain, XVal, XTest, YTrain, YVal, YTest\n return XTrain, XTest, YTrain, YTest\n\n'''#학습/validation/ test로 분할\ndef spilt_data_for_exp_v2(data, Y):\n nSubjects = data.shape[0]\n nArrayLen = data.shape[3]\n nChannels = data.shape[4]\n\n #idx = np.array(range(nSubjects))\n #random.shuffle(idx)\n nTrain = 19\n\n idx = np.zeros([nSubjects,])\n idx[7] = 0\n\n XTrain = data[idx==1,:,:,:,:]\n XTest = data[7, :, :, :, :]\n\n YTrain= Y[idx==1,:]\n YTest = Y[7,:]\n\n XTrain = XTrain.reshape([-1,nArrayLen, nChannels])\n XTest = XTest.reshape([-1, nArrayLen, nChannels])\n YTrain = YTrain.reshape([-1, ])\n YTest = YTest.reshape([-1, ])\n\n YTrain = to_categorical(YTrain)\n YTest = to_categorical(YTest)\n\n return XTrain, XTest, YTrain, YTest'''\n\n\n#학습/validation/ test로 분할 (Leave one subject out)\ndef spilt_data_for_exp_Nfold(data, Y, idx_test):\n nSubjects = data.shape[0]\n nArrayLen = data.shape[3]\n nChannels = data.shape[4]\n\n bTrain = np.ones(nSubjects, )\n bTrain[idx_test] = 0\n\n idx = np.array(range(nSubjects))\n\n XTrain = data[bTrain==1,:,:,:,:]\n XTest = data[idx_test, :, :, :, :]\n\n YTrain= Y[bTrain==1]\n YTest = Y[idx_test]\n\n XTrain = XTrain.reshape([-1,nArrayLen, nChannels])\n XTest = XTest.reshape([-1, nArrayLen, nChannels])\n YTrain = YTrain.reshape([-1, ])\n YTest = YTest.reshape([-1, ])\n\n YTrain = to_categorical(YTrain)\n YTest = to_categorical(YTest)\n\n return XTrain, XTest, YTrain, YTest\n","repo_name":"kktaek/air_writing_git","sub_path":"explib.py","file_name":"explib.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6235131574","text":"from django.urls import path, re_path, include\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('register/', views.register, name='register'),\r\n path('profile/', views.profile, name='profile'),\r\n path('login/', views.login, name='login'),\r\n path('sign_out/', views.sign_out, name='sign_out'),\r\n\r\n # user repath to avoid conflict with admin login\r\n re_path('.*', views.login, name='login'),\r\n]\r\n","repo_name":"2790004/WAD_2_Group_6B_Project","sub_path":"Tournament/UserCenter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43800817485","text":"# This problem was asked by Amazon.\n# Given a pivot x, and a list lst, partition the list into three parts.\n# •\tThe first part contains all elemenets in lst that are less than x\n# •\tThe second part contains all elemenets in lst that are equal to x\n# •\tThe third part contains all elemenets in lst that are larger than x\n# Ordering within a part can be arbitrary.\n# For example, given x = 10 and lst = [9, 12, 3, 5, 14, 10, 10], one partition may be [9, 3, 5, 10, 10, 12, 14]\n####\n# Using three individual lists and concatenating them seems like an obvious enough solution.\n# This solution is aimed at performing this without using the O(n) extra space.\n\n# swap numbers lower than x to their rightful place\ndef rightful_swap(arr, begin, end, x, lam):\n while True:\n # lam decides the objects that belong to the beginning portion of the array\n # assuming lam is a '<' operator, 'begin' increases until a value greater than\n # x is encountered (i.e. a value that doesn't belong in the first portion)\n while lam(arr[begin], x):\n begin += 1\n # conversely, this loop identifies an element that does not belong in the latter portion\n while not lam(arr[end], x):\n end -= 1\n # iterators have crossed each other\n if begin >= end:\n break\n # swappe elements\n arr[begin], arr[end] = arr[end], arr[begin]\n # this denotes the index at which the partition accurs\n return begin\n\ndef partitioned(arr, x):\n n = len(arr)\n # put all elements less than x in the correct part of the array\n new_begin = rightful_swap(arr, 0, n-1, x, lambda x, y: x < y)\n # the index at which the partition occurs, becomes the starting index of the\n # next partition\n # put all elements equal to x in the correct part of the array\n rightful_swap(arr, new_begin, n-1, x, lambda x, y: x == y)\n\n\n####\nt = [9, 12, 3, 5, 14, 10, 10]\npartitioned(t, 10)\nprint(t)\n","repo_name":"whoophee/DCP","sub_path":"143.py","file_name":"143.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"3634896874","text":"#NOMI-COSE-STATI\r\n\r\n#variabili\r\nlettera_random = \"\"\r\nnomi = \"\"\r\ncontinuare = \"s\"\r\npunteggio = 0\r\ni = 0\r\nx = 0\r\n#______________________________________________________________________________________________________________\r\n\r\n#import\r\nimport time\r\nimport random \r\n#______________________________________________________________________________________________________________\r\n\r\n#liste\r\nlettera = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\"]\r\nlunghezza_lettera = len(lettera)\r\n#______________________________________________________________________________________________________________\r\n\r\nlista_nomi_A = [\"andrea\",\"alfonso\",\"ambrogio\",\"anna\",\"anastasia\",\"angelica\",\"armando\"]\r\nlista_cose_A = [\"acqua\",\"aria\",\"ago\",\"album\",\"arco\",\"armadio\",\"astuccio\",\"ancora\"]\r\nlista_stati_A = [\"arabia saudita\",\"afghanistan\",\"alabama\",\"america\",\"albania\",\"alaska\",\"andorra\"]\r\n#______________________________________________________________________________________________________________\r\n\r\nlista_nomi_B = [\"baldo\",\"brambilla\",\"bartolomeo\",\"beatrice\",\"benito\",\"berardo\",\"busso\"]\r\nlista_cose_B = [\"bacchetta\",\"bagaglio\",\"balestra\",\"bandiera\",\"banco\",\"barattolo\",\"bomba\"]\r\nlista_stati_B = [\"bangladesh\",\"belgio\",\"bielorussia\",\"bosnia\",\"brasile\",\"bolivia\"]\r\n#______________________________________________________________________________________________________________\r\n\r\nlista_nomi_C = [\"christian\",\"chiara\",\"cione\",\"cristofaro\",\"camilla\",\"claudio\",\"caterina\"]\r\nlista_cose_C = [\"cacciavite\",\"chiodo\",\"calza\",\"calamita\",\"camicia\",\"chitarra\",\"campanella\",\"cintura\"]\r\nlista_stati_C = [\"california\",\"ciad\",\"cile\",\"cina\",\"canada\",\"corea\",\"croazia\"]\r\n#______________________________________________________________________________________________________________\r\n\r\nlista_nomi_D = [\"dario\",\"daniele\",\"donato\",\"daria\",\"deniso\",\"demo\",\"diaco\"]\r\nlista_cose_D = [\"dado\",\"dente\",\"dito\",\"dondolo\",\"disco\",\"diamante\",\"dipinto\",\"diedro\"]\r\nlista_stati_D = [\"dominicana\",\"danimarca\",\"delaware\",\"dani\",\"dakota\"]\r\n#______________________________________________________________________________________________________________\r\n\r\nlista_nomi_E = [\"edoardo\",\"edmundo\",\"enrico\",\"efesto\",\"esseba\",\"elena\",\"eleonora\"]\r\nlista_cose_E = [\"elastico\",\"elicottero\",\"estintore\",\"elmo\",\"elettrone\",\"esagono\",\"esofago\"]\r\nlista_stati_E = [\"ecuador\",\"egitto\",\"estonia\",\"eritrea\",\"etiopia\",\"emirati arabi\"]\r\n#______________________________________________________________________________________________________________\r\n\r\nlista_nomi_F = [\"farè\",\"francesca\",\"federico\",\"franco\",\"francesco\",\"fabrizio\",\"fernandez\"]\r\nlista_cose_F = [\"forno\",\"frigo\",\"felpa\",\"fazzoletto\",\"finestra\",\"fischietto\",\"flauto\",\"fisarmonica\"]\r\nlista_stati_F = [\"figi\",\"filippine\",\"finlandia\",\"florida\",\"francia\"]\r\n#______________________________________________________________________________________________________________\r\n\r\nlista_nomi_G = [\"gaia\",\"gabriele\",\"gerardo\",\"gertrude\",\"giacomo\",\"giovanni\",\"gonzalez\"]\r\nlista_cose_G = [\"galeone\",\"gelato\",\"gioco\",\"gesso\",\"gioiello\",\"gomma\",\"guanti\",\"guscio\"]\r\nlista_stati_G = [\"germania\",\"georgia\",\"guinea\",\"giordania\",\"grecia\",\"ghana\",\"giappone\"]\r\n#______________________________________________________________________________________________________________\r\n\r\n#funzioni\r\ndef print_regole():\r\n print(\"-------------------------------------------------------------------------------------------------------\")\r\n print(\"Le regole sono semplici:\")\r\n time.sleep(1.0)\r\n print(\"1. Verrà generata una lettera casuale tra A e G\")\r\n time.sleep(1.0)\r\n print(\"2. Dovrai inserire le parole richieste con la lettera iniziale uguale a quella generata\")\r\n time.sleep(1.0)\r\n print(\"3. Ti verrà assegnato un punteggio di: \")\r\n time.sleep(1.0)\r\n print(\"0 punti se la parola inserita non è presente all'interno della lista e la lettera iniziale è diversa\")\r\n time.sleep(1.0)\r\n print(\"5 punti se la parola inizia con la lettera corretta ma non è nell'elenco\")\r\n time.sleep(1.0)\r\n print(\"10 punti se la parola è corretta ed è nella lista del programma\")\r\n print(\"-------------------------------------------------------------------------------------------------------\")\r\n#______________________________________________________________________________________________________________\r\n\r\n#inizio\r\nprint(\"\")\r\nprint(\"--------NOMI-COSE-STATI--------\")\r\nprint(\"\")\r\nprint(\"Benvenuto nel gioco di nomi cose e stati!\")\r\nprint(\"\") \r\ntime.sleep(1.0)\r\nprint(\"Durante il gioco inserisci 's' per approvare una domanda\")\r\nprint(\"\") \r\ntime.sleep(1.0)\r\nvuoi_iniziare = str(input(\"Desideri iniziare? \"))\r\nprint(\"\")\r\nif (vuoi_iniziare == \"s\"):\r\n regole = str(input(\"Vuoi consoscere le regole del gioco? \"))\r\n if(regole == \"s\"):\r\n print_regole()\r\n while continuare == \"s\":\r\n if (x == 6):\r\n break\r\n lettera_random:str = random.choice(lettera)\r\n print(\"\") \r\n print(\"La lettera generata è \" + str(lettera_random))\r\n print(\"\") \r\n#______________________________________________________________________________________________________________\r\n\r\n#lettera \"A\"\r\n if(lettera_random == \"A\"):\r\n nome = input(\"-inserisci un nome che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(nome[0] == \"a\"):\r\n punteggio = punteggio + 5\r\n if(nome in lista_nomi_A):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio\r\n print(\"\")\r\n cosa = input(\"-inserisci una cosa che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(cosa[0] == \"a\"):\r\n punteggio = punteggio + 5\r\n if(cosa in lista_cose_A):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio \r\n print(\"\") \r\n stato = input(\"-inserisci uno stato che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(stato[0] == \"a\"):\r\n punteggio = punteggio + 5\r\n if(cosa in lista_stati_A):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio\r\n lettera.remove(lettera_random)\r\n i = i + 3\r\n print(\"\") \r\n continuare = input(\"Desideri continuare? \")\r\n x = x + 1 \r\n#______________________________________________________________________________________________________________\r\n\r\n#lettera \"B\"\r\n if(lettera_random == \"B\"):\r\n nome = input(\"-inserisci un nome che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(nome[0] == \"b\"):\r\n punteggio = punteggio + 5\r\n if(nome in lista_nomi_B):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n cosa = input(\"-inserisci una cosa che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(cosa[0] == \"b\"):\r\n punteggio = punteggio + 5\r\n if(cosa in lista_cose_B):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n stato = input(\"-inserisci uno stato che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(stato[0] == \"b\"):\r\n punteggio = punteggio + 5\r\n if(stato in lista_stati_B):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio\r\n lettera.remove(lettera_random)\r\n i = i + 3\r\n print(\"\") \r\n continuare = input(\"Desideri continuare? \")\r\n x = x + 1 \r\n#______________________________________________________________________________________________________________\r\n\r\n#lettera \"C\"\r\n if(lettera_random == \"C\"):\r\n nome = input(\"-inserisci un nome che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(nome[0] == \"c\"):\r\n punteggio = punteggio + 5\r\n if(nome in lista_nomi_C):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n cosa = input(\"-inserisci una cosa che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(cosa[0] == \"c\"):\r\n punteggio = punteggio + 5\r\n if(cosa in lista_cose_C):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n stato = input(\"-inserisci uno stato che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(stato[0] == \"c\"):\r\n punteggio = punteggio + 5\r\n if(stato in lista_stati_C):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio\r\n lettera.remove(lettera_random)\r\n i = i + 3\r\n print(\"\") \r\n continuare = input(\"Desideri continuare? \")\r\n x = x + 1 \r\n#______________________________________________________________________________________________________________\r\n\r\n#lettera \"D\"\r\n if(lettera_random == \"D\"):\r\n nome = input(\"-inserisci un nome che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(nome[0] == \"d\"):\r\n punteggio = punteggio + 5\r\n if(nome in lista_nomi_D):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n cosa = input(\"-inserisci una cosa che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(cosa[0] == \"d\"):\r\n punteggio = punteggio + 5\r\n if(cosa in lista_cose_D):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n stato = input(\"-inserisci uno stato che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(stato[0] == \"d\"):\r\n punteggio = punteggio + 5\r\n if(stato in lista_stati_D):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio\r\n lettera.remove(lettera_random)\r\n i = i + 3\r\n print(\"\") \r\n continuare = input(\"Desideri continuare? \")\r\n x = x + 1 \r\n#______________________________________________________________________________________________________________\r\n\r\n#lettera \"E\"\r\n if(lettera_random == \"E\"):\r\n nome = input(\"-inserisci un nome che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(nome[0] == \"e\"):\r\n punteggio = punteggio + 5\r\n if(nome in lista_nomi_E):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n cosa = input(\"-inserisci una cosa che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(cosa[0] == \"e\"):\r\n punteggio = punteggio + 5\r\n if(cosa in lista_cose_E):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n stato = input(\"-inserisci uno stato che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(stato[0] == \"e\"):\r\n punteggio = punteggio + 5\r\n if(stato in lista_stati_E):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio\r\n lettera.remove(lettera_random)\r\n i = i + 3\r\n print(\"\") \r\n continuare = input(\"Desideri continuare? \")\r\n x = x + 1 \r\n#______________________________________________________________________________________________________________\r\n\r\n#lettera \"F\"\r\n if(lettera_random == \"F\"):\r\n nome = input(\"-inserisci un nome che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(nome[0] == \"f\"):\r\n punteggio = punteggio + 5\r\n if(nome in lista_nomi_F):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n cosa = input(\"-inserisci una cosa che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(cosa[0] == \"f\"):\r\n punteggio = punteggio + 5\r\n if(cosa in lista_cose_F):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n stato = input(\"-inserisci uno stato che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(stato[0] == \"f\"):\r\n punteggio = punteggio + 5\r\n if(stato in lista_stati_F):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio\r\n lettera.remove(lettera_random)\r\n i = i + 3\r\n print(\"\") \r\n continuare = input(\"Desideri continuare? \")\r\n x = x + 1\r\n#______________________________________________________________________________________________________________\r\n\r\n#lettera \"G\"\r\n if(lettera_random == \"G\"):\r\n nome = input(\"-inserisci un nome che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(nome[0] == \"g\"):\r\n punteggio = punteggio + 5\r\n if(nome in lista_nomi_G):\r\n punteggio = punteggio + 5\r\n print(\"\")\r\n cosa = input(\"-inserisci una cosa che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(cosa[0] == \"g\"):\r\n punteggio = punteggio + 5\r\n if(cosa in lista_cose_G):\r\n punteggio = punteggio + 5\r\n print(\"\") \r\n stato = input(\"-inserisci uno stato che inizi con la lettera \" + str(lettera_random) + \":\")\r\n if(stato[0] == \"g\"):\r\n punteggio = punteggio + 5\r\n if(stato in lista_stati_G):\r\n punteggio = punteggio + 5\r\n else:\r\n punteggio = punteggio\r\n lettera.remove(lettera_random)\r\n i = i + 3\r\n print(\"\") \r\n continuare = input(\"Desideri continuare? \")\r\n x = x + 1\r\n#______________________________________________________________________________________________________________ \r\n\r\n#stampa finale\r\n print(\"-------------------------------------------------------------------------------------------------------\")\r\n print(\"IL TUO PUNTEGGIO: \" + str(punteggio))\r\n percentuale_punteggio = punteggio / (i * 10) * 100\r\n if(percentuale_punteggio == 100):\r\n print(\"Hai ottenuto un punteggio perfetto, complimenti!\")\r\n if(percentuale_punteggio < 100 and percentuale_punteggio >= 75):\r\n print(\"Bravo, non è il massimo ma c'eri vicino!\")\r\n if(percentuale_punteggio < 75 and percentuale_punteggio >= 50):\r\n print(\"Non male ma puoi fare di meglio!\")\r\n if(percentuale_punteggio < 50 and percentuale_punteggio >= 25 ):\r\n print(\"Non è andata malissimo ma cerca di superare il 50% dei punti totali!\")\r\n if(percentuale_punteggio < 25 ):\r\n print(\"Che delusione! Vedi di riprovare!\")\r\n print(\"\")","repo_name":"chridemo/corso_base_programmazione","sub_path":"nomi-cose-stati.py","file_name":"nomi-cose-stati.py","file_ext":"py","file_size_in_byte":14903,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15082587308","text":"from resources.Data import Dicts\nfrom datetime import datetime\n# from reminder.logger_reminder import *\n\n\nGROUP_1 = Dicts.GROUP_1\nGROUP_2 = Dicts.GROUP_2\nGROUP_3 = Dicts.GROUP_3\n\ndef choose_day():\n current_time = datetime.now().strftime(\"%H\")\n current_day = datetime.today().weekday()\n if current_time == '00':\n if current_day != 0:\n return current_day - 1\n else:\n return 6\n else:\n return current_day\n\ndef get_time_zone(time=None):\n if time is None:\n time = int(datetime.now().strftime(\"%H\"))\n\n if time >= 21 or time == 0:\n return '21-1'\n else:\n ranges = map(lambda x: x.split('-'), Dicts.TYPE_1.keys())\n time_zone = '-'.join(list(filter(lambda x: time in range(int(x[0]), int(x[1])), ranges))[0])\n return time_zone\n\n\ndef inform(group: int):\n current_time = datetime.now().strftime(\"%H\")\n right_border = get_time_zone().split('-')[1]\n difference = int(right_border) - int(current_time)\n next_condition = get_next_condition(group)\n # send_log(f\"Difference: {difference}, right boarder: {right_border}, current_time: {current_time}, next condition: {next_condition}\")\n if next_condition in (\"Можливе Відключення\", \"Немає Енергії\") and difference == 1:\n return True\n else:\n return False\n\n\ndef get_next_condition(group: int) -> str:\n time = int(datetime.now().strftime(\"%H\"))\n day = choose_day()\n if time >= 21 or time == 0:\n if day == 6:\n if group == 1:\n return GROUP_1[Dicts.WEEK_DAY[0]][get_time_zone(1)]\n elif group == 2:\n return GROUP_2[Dicts.WEEK_DAY[0]][get_time_zone(1)]\n else:\n return GROUP_3[Dicts.WEEK_DAY[0]][get_time_zone(1)]\n else:\n if group == 1:\n return GROUP_1[Dicts.WEEK_DAY[day + 1]][get_time_zone(1)]\n elif group == 2:\n return GROUP_2[Dicts.WEEK_DAY[day + 1]][get_time_zone(1)]\n else:\n return GROUP_3[Dicts.WEEK_DAY[day + 1]][get_time_zone(1)]\n else:\n if group == 1:\n return GROUP_1[Dicts.WEEK_DAY[day]][get_time_zone(time + 4)]\n elif group == 2:\n return GROUP_2[Dicts.WEEK_DAY[day]][get_time_zone(time + 4)]\n else:\n return GROUP_3[Dicts.WEEK_DAY[day]][get_time_zone(time + 4)]\n\n\n\ndef get_condition(group: int) -> str:\n time = int(datetime.now().strftime(\"%H\"))\n day = choose_day()\n time_zone = get_time_zone(time)\n if group == 1:\n return GROUP_1[Dicts.WEEK_DAY[day]][time_zone]\n elif group == 2:\n return GROUP_2[Dicts.WEEK_DAY[day]][time_zone]\n else:\n return GROUP_3[Dicts.WEEK_DAY[day]][time_zone]\n\n\n\ndef get_day_sсhedule(group: int) -> dict:\n day = choose_day()\n if group == 1:\n return GROUP_1[Dicts.WEEK_DAY[day]]\n elif group == 2:\n return GROUP_2[Dicts.WEEK_DAY[day]]\n else:\n return GROUP_3[Dicts.WEEK_DAY[day]]\n\n\n\n\n","repo_name":"rabatyaga/Black-Out-Bot","sub_path":"resources/electricity.py","file_name":"electricity.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10759464270","text":"from shutil import copy2\nimport constants as const\nfrom BOGClient import BOGClient\n\ndef run_client(token: str):\n client = BOGClient()\n client.run(token)\n\n\nif __name__ == '__main__':\n if const.TOKEN:\n run_client(const.TOKEN)\n else:\n print(\"Your environment variables were not set!\")\n print(\"Please modify your .env file\")\n copy2(const.TEMP_ENV_PATH, const.ENV_PATH)\n exit(1)\n","repo_name":"MetriC-DT/BOG","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25238631190","text":"\nimport re\n\nimport spacy\n\nfrom spacy.tokenizer import Tokenizer\nfrom spacy.attrs import ORTH, NORM\nfrom spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS\nimport logging\nfrom tqdm import tqdm\nfrom spacy.tokenizer import Tokenizer\nfrom spacy.lang.en import English\n\n\nLANG = 'en_core_web_sm'\n\nDISABLE = [\"tagger\", \"ner\", \"lemmatizer\"]\n\n#INFIXES = ['[~:*\\(\\)\\/-]']\n#INFIXES = ['[:\\(\\)\\/-]']\nINFIXES = []\n\n#PREFIXES = [\"\\[\\*\\*\", \"~~\\*\\*\\*\", \"<<\"]\n#SUFFIXES = [\"\\*\\*\\]\", \"\\*\\*\\*~~\", \">>\", '([a-zA-Z]{1,2}\\.){2,4}']\n\nPREFIXES = []\n#SUFFIXES = ['([a-zA-Z]{1,2}\\.){2,4}']\nSUFFIXES = []\n\n#SPECIAL_CASES = [\n# (\"[**\", [{ORTH: \"[**\"}]),\n# (\"**]\", [{ORTH: \"**]\"}]),\n# (\"~~***\", [{ORTH: \"~~***\"}]),\n# (\"***~~\", [{ORTH: \"***~~\"}]),\n#]\n\nSPECIAL_CASES = []\n\n\n\nASCII_MAP = {}\n# Hyphen\n# ASCII_MAP[173] = 45\nASCII_MAP['\\xad'] = '-'\n\n\n'''\nThis paper provides some spacy customization suggestions:\nhttps://medinform.jmir.org/2020/7/e18417/pdf\nSee the appendix\n\nName Description Additional affix rules Text Tokens\nPrefixes A regex-based function for\nidentifying token prefixes\n'[**',\n'**]',\n(\\d+/\\d+ | \\d+\\,\\d+ | \\d*.\\d+ | \\d+. | \\d+),\n'-', '.', 'O2', 'o2'\n[**2012 [**, 2012\n2.3units 2.3, units\nInfixes\nA regex-based function for\nidentifying the infixes in a\ntoken\n'(', '+', '->', '/', '-', ':' 50+units\nlisin/hctz\n50, +, units\nlisin, /, hctz\nSuffixes A regex-based function for\nidentifying token suffixes\n'[**', '**]', 'mg', 'prn', 'qhs', 'hrs', 'O2',\n'o2', '(s)', '-', ':', 'NC', 'SQ', 'PRBC',\n'QAM', 'QPM', 'PM', 'nc', 'MWF', 'QD',\n'RBCs'\n20mg\n8hrs\n20, mg\n8, hrs\n\n\n\n'''\n\n\ndef line_break_boundary(doc):\n for token in doc[:-1]:\n if token.text.isspace() and ('\\n' in token.text):\n doc[token.i+1].is_sent_start = True\n return doc\n\ndef get_tokenizer():\n nlp = spacy.load(LANG, disable=DISABLE)\n return nlp\n\n\ndef get_tokenizer_OLD( \\\n lang = LANG,\n disable = DISABLE,\n #prefixes_custom = PREFIXES,\n #infixes_custom =INFIXES,\n #suffixes_custom = SUFFIXES,\n #special_cases = None,\n linebreak_bound = False):\n\n '''\n http://www.longest.io/2018/01/27/spacy-custom-tokenization.html\n '''\n\n nlp = spacy.load(lang, disable=disable)\n\n # Incorporate custom prefixes\n #prefixes_default = list(nlp.Defaults.prefixes)\n #prefixes_all = tuple(prefixes_default + prefixes_custom)\n #prefixes_re = spacy.util.compile_prefix_regex(prefixes_all)\n\n # Incorporate custom infixes\n #infixes_default = list(nlp.Defaults.infixes)\n #infixes_all = tuple(infixes_default + infixes_custom)\n #infixes_re = spacy.util.compile_infix_regex(infixes_all)\n\n # Incorporate custom suffixes\n #suffixes_default = list(nlp.Defaults.suffixes)\n #suffixes_all = tuple(suffixes_default + suffixes_custom)\n #suffix_re = spacy.util.compile_suffix_regex(suffixes_all)\n\n # Create tokenizer\n tokenizer = Tokenizer(nlp.vocab,\n nlp.Defaults.tokenizer_exceptions,\n #prefix_search = prefixes_re.search,\n #infix_finditer = infixes_re.finditer,\n #suffix_search = suffix_re.search,\n token_match=None)\n\n # Incorporate special cases\n if special_cases is not None:\n for tok, case in special_cases:\n tokenizer.add_special_case(tok, case)\n\n\n nlp.tokenizer = tokenizer\n\n if linebreak_bound:\n nlp.add_pipe(line_break_boundary, first=True)\n return nlp\n\n\ndef is_ascii(s):\n\n for c in s:\n if ord(c) > 128:\n logging.warn('{} {} is not ascii'.format(c, ord(c)))\n return True\n\ndef map2ascii(text, map=ASCII_MAP):\n\n for original, new in map.items():\n text = text.replace(original, new)\n\n return text\n\ndef normalize_linebreaks(text):\n text = re.sub('\\r\\n', '\\n', text)\n return text\n\n\ndef has_windows_linebreaks(text):\n return '\\r\\n' in text\n\n\n#def char_cleanup(X, map=CHAR_MAP):\n # Y = []\n # for x in X:\n # x = ord(x)\n # y = chr(map.get(x, x))\n # Y.append(y)\n # Y = ''.join(Y)\n # return Y\n\ndef simple_tokenization(A, punct=set('''\".:,;/()-\\'''')):\n\n B = []\n for a in A:\n if a in punct:\n B.append(' ')\n B.append(a)\n B.append(' ')\n else:\n B.append(a)\n\n B = ''.join(B).split()\n\n assert ''.join(''.join(B).split()) == ''.join(A.split())\n\n return B\n\ndef get_char_indices(text, tokens):\n\n text_orig = text\n\n i = 0\n indices = []\n for token in tokens:\n\n # look for matches\n m = re.search(re.escape(token), text)\n\n # make sure match found\n assert m\n\n # store indices\n start = m.start(0) + i\n end = m.end(0) + i\n assert isinstance(start, int)\n assert isinstance(end, int)\n assert end > start\n indices.append((start, end))\n\n # update counter an text\n i = end\n text = text[m.end(0):]\n\n assert len(indices) == len(tokens), f'{len(indices)} vs {len(tokens)}'\n\n for j, (start, end) in enumerate(indices):\n assert tokens[j] == text_orig[start:end], f'{repr(tokens[j])} vs {repr(text[start:end])}'\n\n return indices\n\ndef rm_extra_linebreaks(text):\n\n # get original character count out text without white\n char_count = len(text)\n wo_ws = ''.join(text.split())\n\n # find all redundant linebreaks\n matches = list(re.finditer('\\n[ \\n]+', text))\n\n # iterate over matches\n for m in matches:\n\n orig = m.group(0)\n\n start = m.start()\n end = m.end()\n n = end - start\n new = ' '*(n-1) + '\\n'\n assert len(new) == len(orig)\n\n text = text[:start] + new + text[end:]\n\n assert char_count == len(text)\n assert wo_ws == ''.join(text.split())\n\n return text\n\ndef rm_footer(text, footer):\n\n n = len(footer)\n assert text[-n:] == footer, f\"{text[-n:]} vs {footer}\"\n\n text = text[:-n]\n\n return text\n\n\n\ndef tokenize_document(text, tokenizer, keep_ws=False, max_sent_count=None, pad_start=False, pad_end=False):\n\n spacy_doc = tokenizer(text)\n sents = list(spacy_doc.sents)\n\n # Filter sentences\n sents = [sent for sent in sents if keep_ws or (not sent.text.isspace())]\n\n sentences = []\n sent_offsets = []\n token_offsets = []\n for sent in sents:\n\n sent_offsets.append((sent.start_char, sent.end_char))\n sentences.append(sent.text)\n token_offsets.append([])\n\n # filter tokens\n tokens = [token for token in sent if keep_ws or (not token.text.isspace())]\n\n if pad_start:\n token_offsets[-1].append((-1, -1))\n\n for token in tokens:\n token_start = token.idx\n token_end = token_start + len(token.text)\n token_offsets[-1].append((token_start, token_end))\n\n if pad_end:\n token_offsets[-1].append((-1, -1))\n\n for sent in sentences:\n if not keep_ws:\n assert len(sent) > 0\n\n if max_sent_count is not None:\n\n sentences = sentences + ['']*max_sent_count\n sent_offsets = sent_offsets + [(0,0)]*max_sent_count\n token_offsets = token_offsets + [[(0,0)]]*max_sent_count\n\n sentences = sentences[:max_sent_count]\n sent_offsets = sent_offsets[:max_sent_count]\n token_offsets = token_offsets[:max_sent_count]\n\n\n assert len(sentences) == len(sent_offsets)\n assert len(sentences) == len(token_offsets)\n\n\n return (sentences, sent_offsets, token_offsets)\n\n\ndef get_tokens(text, tokenizer, keep_ws=False):\n\n spacy_doc = tokenizer(text)\n sents = list(spacy_doc.sents)\n\n # Filter sentences\n sents = [s for s in sents if keep_ws or (not s.text.isspace())]\n\n tokens = []\n for sent in sents:\n\n\n # filter tokens\n tokens.append([t for t in sent if keep_ws or (not t.text.isspace())])\n\n return tokens\n\n#def tokenize_corpus(documents, max_sent_count=None,\\\n# linebreak_bound=True, keep_ws=False,\n# pad_start=False, pad_end=False):\ndef tokenize_corpus(documents, max_sent_count=None,\\\n keep_ws=False, pad_start=False, pad_end=False):\n\n\n tokenizer = get_tokenizer()\n\n logging.info(\"Tokenizing documents...\")\n\n sentences = []\n sent_offsets = []\n token_offsets = []\n pbar = tqdm(total=len(documents))\n for doc in documents:\n sent, sent_off, tok_off = tokenize_document( \\\n text = doc,\n tokenizer = tokenizer,\n keep_ws = keep_ws,\n max_sent_count = max_sent_count,\n pad_start = pad_start,\n pad_end = pad_end)\n\n sentences.append(sent)\n sent_offsets.append(sent_off)\n token_offsets.append(tok_off)\n\n pbar.update()\n pbar.close()\n return (sentences, sent_offsets, token_offsets)\n\n\n\n\ndef remove_white_space_at_ends(text, start, end):\n\n # leading white space\n n = len(text)\n text = text.lstrip()\n start += n - len(text)\n\n # trailing white space\n n = len(text)\n text = text.rstrip()\n end -= n - len(text)\n\n return (text, start, end)\n\n\n\ndef get_context(spacy_doc, start, end, context_len=1):\n\n target = None\n\n sentences = list(spacy_doc.sents)\n sentences = [sent for sent in sentences if (not sent.text.isspace())]\n\n for i, sent in enumerate(sentences):\n\n sent_start = sent.start_char\n sent_end = sent.end_char\n\n start_match = (start >= sent_start) and (start < sent_end)\n end_match = (end > sent_start) and (end <= sent_end)\n\n if start_match:\n target = i\n if not end_match:\n logging.warn(f\"get_context: start and end not an same sentence\")\n break\n\n assert target is not None\n\n target_end = target + 1\n target_start = target_end - context_len\n\n context = sentences[target_start:target_end]\n context = [sentence.text for sentence in context]\n context = \" \".join(context)\n\n return context\n","repo_name":"Lybarger/sdoh_extraction","sub_path":"corpus/tokenization.py","file_name":"tokenization.py","file_ext":"py","file_size_in_byte":10165,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"37751892494","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('libreosteoweb', '0021_therapeutsettings_siret'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='therapeutsettings',\n name='invoice_footer',\n field=models.TextField(null=True, verbose_name='Invoice footer', blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"libreosteo/LibreOsteo","sub_path":"libreosteoweb/migrations/0022_therapeutsettings_invoice_footer.py","file_name":"0022_therapeutsettings_invoice_footer.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"71544406024","text":"from genie import Genie\nimport uuid\nimport pandas as pd\n\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.vectorstores import Chroma\nfrom langchain.document_loaders import DataFrameLoader\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\n\nimport chromadb\nfrom chromadb.config import Settings\nfrom datetime import datetime\nfrom pytz import timezone\n\n\nclass GenieMaster:\n _EMBEDDING_FUNCTION = OpenAIEmbeddings()\n\n def __init__(self, db_path=\"./chroma_db\", collection_name=\"langchain\"):\n tz = timezone(\"US/Eastern\")\n self.init_time = datetime.now(tz)\n print(f\"Genie Master initialized at: {self.init_time}\")\n\n self.db_path = db_path\n\n # init chromadb vector storage\n # https://docs.trychroma.com/usage-guide\n client = chromadb.PersistentClient(\n path=db_path, settings=Settings(anonymized_telemetry=False)\n )\n self.vectorstore = Chroma(\n client=client,\n collection_name=collection_name,\n embedding_function=self._EMBEDDING_FUNCTION,\n )\n\n def _document_count(self):\n return self.get_collection().count()\n\n def model_is_ready(self):\n return self._document_count() > 0\n\n def get_collection(self):\n return self.vectorstore._collection\n\n def transform_and_add_data(self, df: pd.DataFrame, page_content_column):\n \"\"\"Transforms the data in the inputted database into vector and stored\n in a vector db. Note: If data being added is already in the database (word for word),\n then this entry will be skipped.\n \"\"\"\n if not \"name\" in df.columns:\n raise Exception(\"Dataframe must have a name column\")\n if df[df.isna().any(axis=1)].shape[0] > 0:\n raise Exception(\"Dataframe must not have any NA values\")\n\n # 1. Load in text as Documents\n loader = DataFrameLoader(df, page_content_column)\n data = loader.load()\n\n print(\"Number of documents loaded:\", len(data))\n\n # 2. Transform\n # split documents into smaller chunks\n text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n docs = text_splitter.split_documents(data)\n\n # create a list of unique ids for each document based on the content\n ids = [str(uuid.uuid5(uuid.NAMESPACE_DNS, doc.page_content)) for doc in docs]\n\n print(f\"Documents split into {len(docs)} chunks\")\n\n # 3. Embed and Store\n print(\"Begin embedding and storage\")\n\n # vectorstore collection\n collection = self.get_collection()\n for id, langchain_doc in zip(ids, docs):\n # ids are unique in chromadb, so duplicate ids will be skipped -> ensures no duplicating document\n content = langchain_doc.page_content\n metadata = langchain_doc.metadata\n\n collection.add(ids=id, metadatas=metadata, documents=content)\n\n print(\"Documents successfully embedded and stored into vectorbase\")\n print(\"Number of documents:\", self._document_count())\n\n return True\n\n def get_genie(\n self,\n name: str,\n model_name: str = \"gpt-3.5-turbo\",\n use_parser=True,\n ) -> Genie:\n if not self.model_is_ready():\n raise Exception(\"Model is not ready: please add data first\")\n\n return Genie(\n name,\n vectorstore=self.vectorstore,\n model_name=model_name,\n use_parser=use_parser,\n )\n\n def get_genies(\n self, names: list[str], model_name: str = \"gpt-3.5-turbo\", use_parser=True\n ) -> dict[str, Genie]:\n genies = {\n name: self.get_genie(\n name=name, model_name=model_name, use_parser=use_parser\n )\n for name in names\n }\n return genies\n\n\nif __name__ == \"__main__\":\n import re\n\n def preprocess_quote(quote):\n # Replace missing spaces with space\n preprocessed_quote = re.sub(r\"(?<=[a-z])(?=[A-Z])\", \" \", quote)\n\n # ...\n # add more if necessary\n\n return preprocessed_quote\n\n df = pd.read_excel(\"data/qadata.xlsx\")\n df[[\"name\", \"party\"]] = df.username.str.split(\" - \", expand=True).apply(\n lambda x: x.str.strip()\n )\n df = df.drop([\"username\"], axis=1)\n\n # taking only a portion of the data for now\n category_list = [\n \"Abortion, Pro-Life & Genetic Engineering\",\n \"Crime, Police & Imprisonment\",\n \"Environment & Climate Change\",\n \"Gun & Property Rights\",\n \"Immigration, Border Security, Terrorism & Homeland Security\",\n \"Jobs, Economy, Trade, Business, Industry & Agriculture\",\n \"Education & Schools\",\n ]\n df = df.loc[df.parent_question.isin(category_list)]\n df[\"answer\"] = df[\"answer\"].apply(preprocess_quote)\n\n # removing None values in \"party\"\n df[\"party\"] = df[\"party\"].apply(lambda p: p if p else \"Other\")\n\n handler = GenieMaster()\n # handler.transform_and_add_data(df, page_content_column=\"answer\")\n genie = handler.get_genie(\"Joe Biden\")\n print(genie.ask(\"Do you believe labor unions help the economy?\"))\n","repo_name":"CloudyLeopard/VoteGenie","sub_path":"genie_master.py","file_name":"genie_master.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42864544235","text":"T = int(input())\r\n\r\nfor _ in range(T):\r\n h, _, n = map(int, input().split())\r\n if n % h == 0:\r\n c = h\r\n r = n // h\r\n else:\r\n c = n % h\r\n r = n // h + 1\r\n print(100*c + r)","repo_name":"iblug/Baekjoon","sub_path":"백준/Bronze/10250. ACM 호텔/ACM 호텔.py","file_name":"ACM 호텔.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14797665278","text":"import os\r\nfrom proc.pcap import Pcap\r\nimport sys\r\nfrom multiprocessing.pool import Pool as ThreadPool\r\nimport csv\r\n\r\n\r\ndef run_compare(file_name):\r\n pcap = Pcap()\r\n packets = pcap.parse(file_name)\r\n out_file_name = \"{}.ns\".format(file_name)\r\n with open(out_file_name, \"w+\") as output:\r\n for packet in packets:\r\n info = packet.raw_data.hex() + '\\n'\r\n output.write(info)\r\n\r\n val = os.popen(\"./ns_compress {}\".format(file_name))\r\n result = val.read()\r\n result = result.split(\"\\n\")\r\n result = [value.split(\":\")[1][1:] for value in result[:-1]]\r\n result = [os.path.split(file_name)[-1]] + result\r\n os.remove(out_file_name)\r\n return result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) != 2:\r\n print(\"There should be one and only one directory name in the given args.\")\r\n path = os.path.abspath(sys.argv[1])\r\n file_names = os.listdir(path)\r\n files = []\r\n for file_name in file_names:\r\n if file_name.endswith(\".pcap\"):\r\n files.append(os.path.join(path, file_name))\r\n pool = ThreadPool(12)\r\n results = pool.map(run_compare, files)\r\n pool.close()\r\n pool.join()\r\n\r\n with open(\"result.csv\", 'w+') as out:\r\n csv_writer = csv.writer(out)\r\n csv_writer.writerow([\"file\", \"ns_gzip c_ratio\", \"ns_gzip c_t\", \"ns_zstd c_ratio\", \"ns_zstd c_t\", \"gzip c_ratio\", \"gzip c_t\", \"zstd c_ratio\", \"zstd c_t\"])\r\n csv_writer.writerows(results)\r\n","repo_name":"andylin-hao/pcap_compress","sub_path":"compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"38359794837","text":"# -*- coding: utf-8 -*-\n# @Author : Ryan\n# @Time : 2021/6/29 08:41\n# @Software : PyCharm\n# @Description : 168. Excel表列名称\n\nfrom collections import *\n\n\nclass Solution:\n def convertToTitle(self, columnNumber: int) -> str:\n result = []\n while columnNumber > 0:\n a0 = (columnNumber - 1) % 26 + 1\n result.append(chr(a0 - 1 + ord('A')))\n columnNumber = (columnNumber - a0) // 26\n return \"\".join(result[::-1])\n\n\nso = Solution()\n# print(so.convertToTitle(2147483647))\nprint(so.convertToTitle(701))\n","repo_name":"EngineerFan/py-demo","sub_path":"src/algorithm/168. Excel表列名称.py","file_name":"168. Excel表列名称.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8704904047","text":"from ctypes import *\nfrom ctypes.util import find_library\nfrom ctypes import sizeof as c_sizeof\nimport errno\nimport time\n\nlibc = CDLL(find_library('c'), use_errno=True)\n\n# hardcoded macosx constants!\nCTL_NET = 4\nPF_ROUTE = 17\nNET_RT_IFLIST2 = 6\n\nRTM_IFINFO2 = 0x12\nIFF_LOOPBACK = 0x8\nAF_LINK = 18\n\nclass timeval32(Structure):\n _fields_ = [('tv_sec', c_int32),\n ('tv_usec', c_int32)]\n\nclass if_data64(Structure):\n _pack_ = 4\n _fields_ = [('ifi_type', c_ubyte),\n ('ifi_typelen', c_ubyte),\n ('ifi_physical', c_ubyte),\n ('ifi_addrlen', c_ubyte),\n ('ifi_hdrlen', c_ubyte),\n ('ifi_recvquota', c_ubyte),\n ('ifi_xmitquota', c_ubyte),\n ('ifi_unused1', c_ubyte),\n ('ifi_mtu', c_uint32),\n ('ifi_metric', c_uint32),\n ('ifi_baudrate', c_uint64),\n ('ifi_ipackets', c_uint64),\n ('ifi_ierrors', c_uint64),\n ('ifi_opackets', c_uint64),\n ('ifi_oerrors', c_uint64),\n ('ifi_collisions', c_uint64),\n ('ifi_ibytes', c_uint64),\n ('ifi_obytes', c_uint64),\n ('ifi_imcasts', c_uint64),\n ('ifi_omcasts', c_uint64),\n ('ifi_iqdrops', c_uint64),\n ('ifi_noproto', c_uint64),\n ('ifi_recvtiming', c_uint32),\n ('ifi_xmittiming', c_uint32),\n ('ifi_lastchange', timeval32)]\n\nclass if_msghdr2(Structure):\n _fields_ = [('ifm_msglen', c_ushort),\n ('ifm_version', c_ubyte),\n ('ifm_type', c_ubyte),\n ('ifm_addrs', c_int),\n ('ifm_flags', c_int),\n ('ifm_index', c_ushort),\n ('ifm_snd_len', c_int),\n ('ifm_snd_maxlen', c_int),\n ('ifm_snd_drops', c_int),\n ('ifm_timer', c_int),\n ('ifm_data', if_data64)]\n\nclass sockaddr_dl(Structure):\n _fields_ = [('sdl_len', c_ubyte),\n ('sdl_family', c_ubyte),\n ('sdl_index', c_ushort),\n ('sdl_type', c_ubyte),\n ('sdl_nlen', c_ubyte),\n ('sdl_alen', c_ubyte),\n ('sdl_slen', c_ubyte),\n ('sdl_data', c_char * 12)] # for now\n\nMIB_TYPE = c_int * 6\nmib = MIB_TYPE(CTL_NET, PF_ROUTE, 0, 0, NET_RT_IFLIST2, 0)\n\ndef query_if(ifname):\n ifname = ifname.encode('ascii')\n sysctl_buf_len = c_uint(0)\n\n rval = libc.sysctl(mib, 6, None, byref(sysctl_buf_len), None, 0)\n if rval != 0:\n raise Exception(errno.errorcode[get_errno()])\n\n sysctl_buf = create_string_buffer(sysctl_buf_len.value)\n rval = libc.sysctl(mib, 6, sysctl_buf, byref(sysctl_buf_len), None, 0)\n if rval != 0:\n raise Exception(errno.errorcode[get_errno()])\n\n# walk the structure. you need to know the length from ifm_msglen\n idx = addressof(sysctl_buf)\n end = idx + sysctl_buf_len.value\n while idx < end:\n batch_off = idx - addressof(sysctl_buf)\n ifmsg = cast(c_void_p(idx), POINTER(if_msghdr2))\n if ifmsg.contents.ifm_type != RTM_IFINFO2:\n idx += ifmsg.contents.ifm_msglen\n continue\n if ifmsg.contents.ifm_flags & IFF_LOOPBACK:\n idx += ifmsg.contents.ifm_msglen\n continue\n # 12 bytes to compensate for 32 bit alignment\n sdl = cast(c_void_p(idx + c_sizeof(if_msghdr2)), POINTER(sockaddr_dl))\n if sdl.contents.sdl_family != AF_LINK:\n idx += ifmsg.contents.ifm_msglen\n continue\n\n if ifname != sdl.contents.sdl_data[0:sdl.contents.sdl_nlen]:\n idx += ifmsg.contents.ifm_msglen\n continue\n return ifmsg.contents.ifm_data.ifi_ibytes, ifmsg.contents.ifm_data.ifi_obytes\n #idx += ifmsg.contents.ifm_msglen\n raise Exception('ifname {0} not found'.format(ifname))\n\ndef bw_rate(ifname, delay=1):\n first_ibytes, first_obytes = query_if(ifname)\n time.sleep(delay)\n second_ibytes, second_obytes = query_if(ifname)\n\n ikb_s = ((second_ibytes - first_ibytes) / (1024. * delay))\n okb_s = ((second_obytes - first_obytes) / (1024. * delay))\n return ikb_s, okb_s\n\nif __name__ == '__main__':\n while True:\n print(bw_rate('en1'))\n","repo_name":"dgilman/berimbau","sub_path":"ifbw.py","file_name":"ifbw.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22407629551","text":"import ast\nfrom utils.fileutil import read_file_to_string_list\nfrom utils.list_util import split_lists_by_whitespace\nimport math\nfrom functools import cmp_to_key\n\n\ndef part1():\n raw_input = read_file_to_string_list(\"adventofcode/day13/input.txt\")\n packet_pairs = split_lists_by_whitespace(raw_input)\n\n valid_pairs = []\n for idx, pairs in enumerate(packet_pairs):\n packet_1 = ast.literal_eval(pairs[0])\n packet_2 = ast.literal_eval(pairs[1])\n if validate_packets(packet_1, packet_2):\n valid_pairs.append(idx + 1)\n print(sum(valid_pairs))\n\n\ndef part2():\n raw_input = read_file_to_string_list(\"adventofcode/day13/input.txt\")\n raw_input.append('[[6]]')\n raw_input.append('[[2]]')\n cleaned_input = [x for x in raw_input if (x != \"\")]\n cleaned_input.sort(key=cmp_to_key(lambda a, b: result_to_compare(validate_packets(ast.literal_eval(a),ast.literal_eval(b)))))\n results = []\n for idx, vals in enumerate(cleaned_input):\n if vals == '[[6]]' or vals == '[[2]]':\n results.append(idx+1)\n print(math.prod(results))\n\ndef result_to_compare(val:bool|None):\n if val is None:\n return 0\n elif val:\n return -1\n else:\n return 1\n\n\ndef validate_packets(p1, p2) -> bool|None:\n idx = 0\n while idx < len(p1) and idx < len(p2):\n if type(p1[idx]) is int and type(p2[idx]) is int:\n if p1[idx] == p2[idx]:\n idx += 1\n else:\n return p1[idx] < p2[idx]\n elif type(p1[idx]) is list and type(p2[idx]) is list:\n result = validate_packets(p1[idx], p2[idx])\n if result is not None:\n return result\n else:\n idx += 1\n else:\n # Types are not the same, make the non-list a list\n if type(p1[idx]) is int:\n result = validate_packets([p1[idx]], p2[idx])\n if result is not None:\n return result\n else:\n idx += 1\n else:\n result = validate_packets(p1[idx], [p2[idx]])\n if result is not None:\n return result\n else:\n idx += 1\n if len(p1) == len(p2):\n return None\n if idx == len(p1):\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n part1()\n part2()\n","repo_name":"trekie86/AdventOfCode2022","sub_path":"adventofcode/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3666746563","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# machine.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: mmarinel +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2022/05/20 11:11:03 by earendil #+# #+# #\n# Updated: 2022/05/21 16:35:16 by mmarinel ### ########.fr #\n# #\n# **************************************************************************** #\n\nBEV_SERVED_BEFORE_FAILURE = 10\n\nimport os\nimport random\nfrom time import sleep\nfrom typing import Type, TypeVar\nfrom beverages import *\n\nclass\tCoffeeMachine:\n\tdef\t__init__(self) -> None:\n\t\trandom.seed()\n\t\tself.__drinks_served = 0;\n\t\n\tclass\tEmptyCup(HotBeverage):\n\t\tname = \"empty cup\"\n\t\tprice = 0.90\n\n\t\tdef\tdescription(self):\n\t\t\treturn (\"An empty cup?! Gimme my money back!\")\n\n\tclass\tBrokenMachineException(Exception):\n\t\tdef\t__init__(self, *args: object) -> None:\n\t\t\tsuper().__init__(\"This coffee machine has to be repaired.\")\n\n\tdef\trepair(self):\n\t\tif self.__drinks_served == BEV_SERVED_BEFORE_FAILURE:\n\t\t\tself.__drinks_served = 0\n\n\tdef\tserve(self, beverage : TypeVar('T', bound=HotBeverage)):\n\t\tif self.__drinks_served == BEV_SERVED_BEFORE_FAILURE:\n\t\t\traise self.BrokenMachineException()\n\t\tself.__drinks_served += 1\n\t\tif random.randrange(1, 3) == 1:\n\t\t\treturn (self.EmptyCup())\n\t\telse:\n\t\t\treturn (beverage())\n\ncheap_machine = CoffeeMachine()\n\n\n########################################### TESTS #############################################\nwhile True:\n\twhile True:\n\t\tos.system(\"clear\")\n\t\taction = input(\"Are we going back to work? (yes/no)\\n\")\n\t\tif action == \"yes\" or action == \"no\":\n\t\t\tbreak\n\t\tprint(\"please enter a valid choice : yes/no\\n\")\n\t\tinput(\"press any key to continue...\")\n\n\tif action == \"yes\":\n\t\tprint(\"See you later pal!\\n\")\n\t\tsleep(0.5)\n\t\tbreak\n\n\twhile True:\n\t\tprint (\"\\nMenu:\")\n\t\tprint(\"\\n\")\n\t\tprint(hot_beverage)\n\t\tprint(\"\\n\")\n\t\tprint(coffee)\n\t\tprint(\"\\n\")\n\t\tprint(tea)\n\t\tprint(\"\\n\")\n\t\tprint(chocolate)\n\t\tprint(\"\\n\")\n\t\tprint(cappuccino)\n\t\tprint(\"\\n\")\n\t\t\n\t\tsleep(0.5)\n\t\tbeverage = input(\"\\nChoose a beverage\\n\")\n\t\ttry:\n\t\t\tif (beverage == \"hot beverage\"):\n\t\t\t\tprint(\"Here you are: \" + cheap_machine.serve(HotBeverage).description())\n\t\t\t\tprint(\"\\n\")\n\t\t\t\tinput(\"press any key to continue...\")\n\t\t\t\tbreak\n\t\t\telif (beverage == \"coffee\"):\n\t\t\t\tprint(\"Here you are: \" + cheap_machine.serve(Coffee).description())\n\t\t\t\tprint(\"\\n\")\n\t\t\t\tinput(\"press any key to continue...\")\n\t\t\t\tbreak\n\t\t\telif (beverage == \"tea\"):\n\t\t\t\tprint(\"Here you are: \" + cheap_machine.serve(Tea).description())\n\t\t\t\tprint(\"\\n\")\n\t\t\t\tinput(\"press any key to continue...\")\n\t\t\t\tbreak\n\t\t\telif (beverage == \"chocolate\"):\n\t\t\t\tprint(\"Here you are: \" + cheap_machine.serve(Chocolate).description())\n\t\t\t\tprint(\"\\n\")\n\t\t\t\tinput(\"press any key to continue...\")\n\t\t\t\tbreak\n\t\t\telif (beverage == \"cappuccino\"):\n\t\t\t\tprint(\"Here you are: \" + cheap_machine.serve(Cappuccino).description())\n\t\t\t\tprint(\"\\n\")\n\t\t\t\tinput(\"press any key to continue...\")\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"please, choose a correct beverage\\n\")\n\t\t\t\tinput(\"press any key to continue...\")\n\t\t\t\tos.system(\"clear\")\n\t\t\t\tcontinue\n\t\texcept CoffeeMachine.BrokenMachineException as bme:\n\t\t\tprint(bme)\n\t\t\tprint(\"Waiting for the guy...\")\n\t\t\tsleep(2)\n\t\t\tprint(\"repairing...\")\n\t\t\tcheap_machine.repair()\n\t\t\tsleep(3)\n\t\t\tprint(\"repaired!\\n\\n\")\n\t\t\tinput(\"press any key to continue...\")\n\t\t\tbreak\n","repo_name":"mmarinel/Piscine_Python","sub_path":"week1/ex09/machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33303444956","text":"from math import sin, pi\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nFPS = 30\r\nSCREEN_HIGHT = 720\r\nSCREEN_WIDTH = 1280\r\nBOUNDS = 30\r\nBACKGROUND = \"background.png\"\r\nMUSIC = \"Laurindo Almeida The Lamp Is Low.wav\"\r\nSnowflakeCount = 400","repo_name":"Mukhammedali22/fun-projects","sub_path":"fallingSnow/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"36822193608","text":"from django import forms\nfrom .models import RecordModel , PostModel\n\nclass RecordForm(forms.ModelForm):\n name = forms.CharField(initial='Your name', label='Your Name')\n email=forms.EmailField(initial=\"your Email Id\")\n label={'amount':'Your Amount'}\n class Meta:\n model=RecordModel\n exclude=['user','slug','created_at']\n widgets={\n 'SubjectModel':forms.CheckboxSelectMultiple(attrs={'multiple':True,}),\n 'GroupModel':forms.CheckboxSelectMultiple(attrs={'multiple':True,}),\n \n }\n labels={ 'subject': 'Your Subjects : ', 'group':'Your Group', 'medium':'Select Medium'\n\n }\n #def __init__ (self, *args, **kwargs):\n # super(). __init__(*args, **kwargs)\n # self.fields['name'].label=\"Your Name\"\n #self.fields['address'].initial='My Present Address : '\n \n \nclass PostForm(forms.ModelForm):\n \n class Meta:\n model =PostModel\n exclude=['user','created_at']\n\n ","repo_name":"khurshed20/Django_project","sub_path":"first_project/education/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37844471620","text":"def game_board():\n board = []\n for square in range(0, 9):\n board.append(' ')\n return board\n\n\ndef print_game_board(board): # board = input cells\n print('-' * 9)\n for i in range(0, len(board), 3):\n print('|', *board[i: i + 3], '|')\n print('-' * 9)\n\n\ndef winner(board):\n done = False\n row = [board[:3], board[3:6], board[6:]]\n column = [board[:7:3], board[1:8:3], board[2::3]]\n diagonal = [board[0::4], board[2:7:2]]\n\n check_win = row + column + diagonal\n count_x = board.count('X')\n count_o = board.count('O')\n\n if (['X', 'X', 'X'] in check_win and ['O', 'O', 'O'] in check_win) or abs(count_x-count_o > 1):\n print('Impossible')\n done = True\n elif ['X', 'X', 'X'] in check_win:\n print('X wins')\n done = True\n elif ['O', 'O', 'O'] in check_win:\n print('O wins')\n done = True\n elif board.count(' ') > 0:\n print('Game not finished')\n done = False\n else:\n print('Draw')\n done = True\n return done\n\n\ndef game_done(board):\n done = False\n row = board[:3], board[3:6], board[6:]\n column = board[:7:3], board[1:8:3], board[2::3]\n diagonal = board[0::4], board[2:7:2]\n\n check_win = row + column + diagonal\n count_x = board.count('X')\n count_o = board.count('O')\n if (['X', 'X', 'X'] in check_win and ['O', 'O', 'O'] in check_win) or abs(count_x-count_o > 1):\n done = True\n elif ['X', 'X', 'X'] in check_win:\n done = True\n elif ['O', 'O', 'O'] in check_win:\n done = True\n elif board.count(' ') > 0:\n done = False\n else:\n done = True\n return done\n\n\ncoordinates = ['1 3', '2 3', '3 3', '1 2', '2 2', '3 2', '1 1', '2 1', '3 1']\n\n\ndef check(user_coordinate, board):\n if not user_coordinate.split()[0].isdigit() or not user_coordinate.split()[1].isdigit():\n print('You should enter numbers!')\n return False\n elif int(user_coordinate.split()[0]) > 3 or int(user_coordinate.split()[1]) > 3:\n print('Coordinates should be from 1 to 3!')\n return False\n elif board[coordinates.index(user_coordinate)] != ' ': # this is not working. fml\n print('This cell is occupied! Choose another one!')\n return False\n else:\n return None\n\n\ndef next_turn(tur):\n if tur == 'X':\n return 'O'\n else:\n return 'X'\n\n\ndef main():\n new_game_board = game_board()\n print_game_board(new_game_board)\n turn = 'X'\n\n while game_done(new_game_board) is False:\n user_coor_input = input('Enter coordinate: ')\n\n while check(user_coor_input, new_game_board) is False:\n user_coor_input = input('Enter coordinate: ')\n print(check(user_coor_input, new_game_board))\n\n if turn == 'X':\n new_game_board[coordinates.index(user_coor_input)] = 'X'\n else:\n new_game_board[coordinates.index(user_coor_input)] = 'O'\n print_game_board(new_game_board)\n turn = next_turn(turn)\n\n if winner(new_game_board):\n break\n\n\nmain()\n\n","repo_name":"helenlavr/Tic-Tac-Toe","sub_path":"Tic-Tac-Toe/task/tictactoe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17815527945","text":"from flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import create_engine, MetaData, Table, Column, Integer, String\n\ndb = SQLAlchemy()\n\nengine = create_engine(\"mysql+pymysql://root:123456@localhost:3306/mytest?charset=utf8\", echo=True, pool_size=8,\n\t\t\t\t\t pool_recycle=60 * 30)\nmetadata = MetaData(engine)\n\nstudent = Table('student', metadata,\n\t\t\t\tColumn('id', Integer, primary_key=True),\n\t\t\t\tColumn('name', String(50)),\n\t\t\t\tColumn('age', Integer),\n\t\t\t\tColumn('address', String(10)),\n\t\t\t\tColumn('teacher_id', Integer),\n\t\t\t\t)\n\nteacher = Table('teacher', metadata,\n\t\t\t\tColumn('teacher_id', Integer, primary_key=True),\n\t\t\t\tColumn('name', String(50)),\n\t\t\t\tColumn('age', Integer),\n\t\t\t\tColumn('address', String(10)),\n\t\t\t\t)\n","repo_name":"ocean-zhouyang/flaskProject","sub_path":"app/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"807158286","text":"import socket\r\n\r\nip = input(\"Enter the IP address of the server: 192.168.\")\r\nif ip != \"\":\r\n ip = \"192.168.\" + ip\r\nelse:\r\n ip = input(\"Enter the IP address of the server:\")\r\n\r\nBUFFER_SIZE = 550\r\n\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect((ip, 25565))\r\ns.send(b\"\\xfe\") # Minecraft server ping packet\r\ndata = s.recv(BUFFER_SIZE)\r\ns.close()\r\n\r\nassert data[0] == 255 # test for valid response\r\ninfo = data[3:].split(b\"\\xa7\") # split the data into a list\r\n\r\nfor i in range(len(info)):\r\n info[i] = info[i].decode(\"ascii\", errors=\"replace\")\r\n info[i] = info[i].replace(\"\\x00\", \"\")\r\n\r\nprint(\"############################################\")\r\nprint(\"Ping response from \" + ip + \":\")\r\nprint(info[0], f\"[{info[1]}/{info[2]}]\")\r\nprint(\"############################################\")\r\n","repo_name":"KAJIKK/minecraft-get-motd","sub_path":"minecraftMotd.py","file_name":"minecraftMotd.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27744376956","text":"#at begining you can set parameters in config file\n\n# to run simulation you need initial networks ( just simply define nodes and addd to network)\nimport staticnet as initialnetwork\nimport gui\nimport config\nimport report\nimport logger\n\n#initialnetwork.net1.introduce_self()\nnet1 = initialnetwork.net1\nenv = initialnetwork.env\ngraphi = gui.graphic(net1)\n\n\n# in second step you need and algorithm\n#logger.logger.log(str(\"__________________________LEACH___________________________________________ start\"))\nprint(\"__________________________LEACH___________________________________________ start\\n\\n\")\nimport LEACH \n\nLEACH1 = LEACH.LEACHC(env,net1)\n#LEACH1.global_cluster_fromation(env)\nLEACH1.Random_Clusterhead_Selection(env,net1)\n#logger.logger.log(str(\"__________________________LEACH___________________________________________ end\"))\nprint(\"__________________________LEACH___________________________________________ end\\n\\n\")\n\n\nprint(\"++++++++++++++++++++++++++++++++++++++++++++++++++\")\nnet1.introduce_yourself()\ngraphi.draw()\n#logger.logger.log(str(\"++++++++++++++++++++++++++++++++++++++++++++++++++ run begin ++++++++++++++++++++++++\"))\nprint(\"++++++++++++++++++++++++++++++++++++++++++++++++++ run begin ++++++++++++++++++++++++\")\n\nenv.run(until=config.MAX_RUNTIME)\n#logger.logger.log(str(\"++++++++++++++++++++++++++++++++++++++++++++++++++ run end ++++++++++++++++++++++++\"))\nprint(\"++++++++++++++++++++++++++++++++++++++++++++++++++ run end ++++++++++++++++++++++++\")\n\n\nnet1.network_packet_summery()\n\n# for n in net1.nodes:\n# print(n,n.TDMA,n.is_CH,n.cluster,\" \",n.distance)\n\n\n# print(net1.clusters)\n# print(net1.clusterheads)\n\n# print(net1.nodes[0].inbox)\nnet1.introduce_yourself() \n\n# net1.network_outboxes()\n# net1.network_inboxes()\n\nreport.plotenergy()\nreport.plotpacket()\n\n","repo_name":"amirinia/pynet2","sub_path":"staticrun.py","file_name":"staticrun.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"10015808299","text":"# importing libraries\nfrom collections import defaultdict\nfrom typing import Any, Union\n\nimport numpy as np\nimport numpy.typing as npt\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom rouge import Rouge\nfrom text_analytics.config import DATA_PATH\n\n\nclass ExtractiveTextSummarizer:\n def __init__(self, article: Union[str, pd.DataFrame]) -> None:\n self.article = article\n self.frequency_table = defaultdict(int)\n self.rouge = Rouge()\n\n def _create_dictionary_table(self, stemmer: Any = None) -> dict:\n\n # removing stop words\n stop_words = set(stopwords.words(\"english\"))\n word_vector = word_tokenize(self.article)\n\n # instantiate the stemmer\n if stemmer is None:\n stemmer = PorterStemmer()\n\n stemmed_word_vector = [stemmer.stem(word) for word in word_vector]\n for word in stemmed_word_vector:\n if word not in stop_words:\n self.frequency_table[word] += 1\n\n return self.frequency_table\n\n def _calculate_sentence_scores(self, sentences: npt.ArrayLike) -> dict:\n\n # algorithm for scoring a sentence by its words\n sentence_weights = defaultdict(int)\n\n for sentence in sentences:\n sentence_wordcount_without_stop_words = 0\n\n for word_weight in self.frequency_table:\n sentence_weights[sentence[:7]] += self.frequency_table[word_weight]\n\n if word_weight in sentence.lower():\n sentence_wordcount_without_stop_words += 1\n\n sentence_weights[sentence[:7]] /= sentence_wordcount_without_stop_words\n\n return sentence_weights\n\n def _calculate_threshold_score(self, sentence_weight: dict) -> float:\n return np.mean(list(sentence_weight.values()))\n\n def _get_article_summary(\n self, sentences: npt.ArrayLike, sentence_weights: dict, threshold: float\n ) -> str:\n article_summary = [\n sentence\n for sentence in sentences\n if sentence[:7] in sentence_weights\n and sentence_weights.get(sentence[:7]) >= threshold\n ]\n\n return \" \".join(article_summary)\n\n def run_article_summary(self):\n\n # creating a dictionary for the word frequency table\n _ = self._create_dictionary_table()\n\n # tokenizing the sentences\n sentences = sent_tokenize(self.article)\n\n # algorithm for scoring a sentence by its words\n sentence_scores = self._calculate_sentence_scores(sentences)\n\n # getting the threshold\n threshold = self._calculate_threshold_score(sentence_scores)\n\n # producing the summary\n article_summary = self._get_article_summary(\n sentences, sentence_scores, 0.95 * threshold\n )\n\n return article_summary\n\n def get_rouge_score(\n self, hypothesis_text: str, reference_text: str\n ) -> npt.ArrayLike:\n scores = self.rouge.get_scores(hypothesis_text, reference_text)\n return scores\n\n\nif __name__ == \"__main__\":\n\n df = pd.read_csv(DATA_PATH / \"review_evaluation.csv\")\n result = []\n articles = df.loc[:, \"review\"]\n reference_summary = df.loc[:, \"Summary\"]\n\n for review, reference_text in zip(articles, reference_summary):\n extractive_summarizer = ExtractiveTextSummarizer(article=review)\n print(f\"Original Review: \\n{review}\")\n print(\"-\" * 200)\n review_summary = extractive_summarizer.run_article_summary()\n result.append(review_summary)\n\n rouge_score = extractive_summarizer.get_rouge_score(\n hypothesis_text=review_summary, reference_text=reference_text\n )\n\n print(f\"Summarised Review: \\n{review_summary}\")\n print(\"-\" * 200)\n print(rouge_score)\n","repo_name":"carolinedlu/text_analytics","sub_path":"text_analytics/text_summarisation/ext_text_summariser.py","file_name":"ext_text_summariser.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33556278146","text":"# File Name : hex2bin.py\n# Author : Benature\n# Create Date: 2019.09.29 周日\n\n# 16进制转2进制\n# 计算误码率\n\nimport math\nimport numpy as np\n\npath_r = \"../data/normal/receiver_sift.bin\"\npath_t = \"../data/normal/transmitter_sift.bin\"\n\nwith open(path_r , \"rb\") as f1:\n receiver = f1.read()\nwith open(path_t , \"rb\") as f2:\n transmitter = f2.read()\n\ndef hex2bin(sift):\n out = []\n for he in sift:\n bi = \"{:0>8}\".format(bin(he)[2:])\n bi = np.array(list(bi))\n out.append(bi)\n return out\n\nsift_r = np.array(hex2bin(receiver))\nsift_t = np.array(hex2bin(transmitter))\n\njudge = (sift_r == sift_t)\n\ncorrect = np.sum(judge)\ntotal = judge.shape[0] * judge.shape[1]\n\nprint(\"误码率:\", (total - correct) / total)","repo_name":"Benature/lazy-kit","sub_path":"other/hex2bin.py","file_name":"hex2bin.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"118596297","text":"import claim_repo\n\n\n\n\ndef create(): \n \n claimID = (input('ClaimID: '))\n claimtype = input('Enter type of claim: ')\n description = input('Description: ')\n claimamount = (input('amount:'))\n dateofincident = input('date of incident: ')\n dateofclaim = input('date of claim: ')\n isvalid = input('is this claim valid yes or no: ')\n\n claim_repo.create_claim(claimID, claimtype, description, claimamount, dateofincident, dateofclaim, isvalid) \n\nwhile True:\n\n x = input('1.add new claim\\n'+'2.view claims\\n'+'3.take care of next claim\\n')\n \n if x == '1':\n create()\n elif x == '2':\n print (f'ClaimID,','claimtype,','description,','date of incident,','date of claim,', 'valid or nah') \n print(claim_repo.all_claims)\n \n elif x == '3':\n print (claim_repo.all_claims[0])\n y = input('take care of this claim right meow? Y/N: ').lower()\n if y == 'y':\n claim_repo.take_care()\n elif y == 'n':\n pass\n","repo_name":"loganchaves/gold_challenges","sub_path":"challenge_2/claim_ui.py","file_name":"claim_ui.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34238125900","text":"\n# 반복문\n# :만족할때까지반복\n#while 조건식:\n# 실행문장\n\n# for 변수 in (반복가능한객체):\n# 반복 실행할 문장\n\n\ni = 1\nwhile i <= 10:\n print(i,end=' , ' )\n i = i + 1\n \n #1+2+3+...........+10\n\n\nprint('\\n')#앤터\n \na=1\nsum = 0\nwhile a <= 10:\n sum += a\n print(a, end= ' ')\n if(a != 10):\n print( \"+\", end=' ' )\n a = a+1\nprint('sum = {}'.format(sum))","repo_name":"rlaqjatjr8922/sbs_python","sub_path":"파이썬/파이썬/Dey.06/ex.03.py","file_name":"ex.03.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"485431507","text":"from typing import Tuple\nfrom abc import ABC, abstractmethod\nfrom time import time\n\nfrom .message import Message\n\nAddress = Tuple[str, int]\n\n\nclass Protocol(ABC):\n name: str = 'protocol name' # Protocol name (for finding related messages)\n\n def __init__(self, node, require_heartbeat: bool = False, heartbeat_interval: float = None):\n assert not require_heartbeat or heartbeat_interval is not None, \"heartbeat interval is required!\"\n\n self.node = node\n self.require_heartbeat = require_heartbeat\n self.last_heartbeat = time()\n self.heartbeat_interval = heartbeat_interval\n\n @abstractmethod\n def handle(self, sender: Address, message: Message):\n raise NotImplementedError\n\n def heartbeat(self):\n self.last_heartbeat = time()\n\n def __contains__(self, message: Message):\n return message.dict_message.get(\"protocol\", None) == self.__class__.name\n","repo_name":"keeplerteam/thekpi","sub_path":"src/p2p_network/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12389652515","text":"from utils import *\n\nSAMPLE = False\nints = read_lines(input_for(__file__, SAMPLE), int)\n\nprint(ints)\n\nfor i in ints:\n for j in ints:\n if i + j == 2020:\n print(i * j)\n\nfor i in ints:\n for j in ints:\n for k in ints: \n if i + j + k == 2020:\n print(i * j * k)","repo_name":"jppellet/adventofcode","sub_path":"2020_01.py","file_name":"2020_01.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"24063130866","text":"#Used to group data from the same trading codes\r\ndef Get_PTOEXE(E):\r\n\treturn E.PTOEXE\r\ndef Get_CodeOfTrading(E):\r\n\treturn E.CodeOfTrading\r\nclass DayInformation:\r\n\tdef __init__(self, phrase):\r\n\t\tself.RegisterType = phrase[0:2]\r\n\t\tself.Date = phrase[2:10]\r\n\t\tself.CodeBDI= phrase[10:12]\r\n\t\tself.CodeOfTrading= phrase[12:24]\r\n\t\tself.MarketType\t= phrase[24:27]\r\n\t\tself.ShortName = phrase[27:39]\r\n\t\tself.SpecificationOfPaper = phrase[39:49]\r\n\t\tself.DeadlineMarket\t= phrase[49:52]\r\n\t\tself.ReferenceCurrency = phrase[52:56]\r\n\t\tself.OpeningPeice = phrase[56:69]\r\n\t\tself.MaximimPrice = phrase[69:82]\r\n\t\tself.MinimumPrice = phrase[82:95]\r\n\t\tself.AveragePrice\t= phrase[95:108]\r\n\t\tself.LastPric = phrase[108:121]\r\n\t\tself.BestPriceBuy = phrase[121:134]\r\n\t\tself.BestPriceSale = phrase[134:147]\r\n\t\tself.NumberOfBusiness = phrase[147:152]\r\n\t\tself.TotalQuantityRoleMarket= phrase[152:170]\r\n\t\tself.TotalVolumeRoleMarket= phrase[170:188]\r\n\t\tself.PREEXE\t= phrase[188:202]\r\n\t\tself.INDOPC\t= phrase[202:202]\r\n\t\tself.DATVEN\t= phrase[203:211]\r\n\t\tself.FATCOT\t= phrase[211:218]\r\n\t\tself.FATCOT\t= phrase[218:231]\r\n\t\tself.PTOEXE\t= phrase[231:243]\r\n\t\tself.DISMES\t= phrase[243:246]","repo_name":"LeandroBruscato/Bovespa_Txt_to_SQLite","sub_path":"DayInformation.py","file_name":"DayInformation.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6198792368","text":"'''\n33. Write a Python program to sum of three given integers. However, if two values are equal sum will be zero.\n\n'''\n\nmy_list = []\nisEqual = False\nsum = 0\ni = 0\n\nwhile i < 3:\n number = int(input(\"Please enter a number : \"))\n sum += number\n if(number in my_list):\n isEqual = True\n my_list.append(number)\n i += 1\n\nif(isEqual):\n sum = 0\n\nprint(sum)\n\n\n\n\n\n\n\n \n\n\n\n\n\n ","repo_name":"ErenBtrk/Python-Exercises-2","sub_path":"Exercise33.py","file_name":"Exercise33.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26273138117","text":"class Solution:\n def convertToTitle(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n str = \"\"\n while n != 0:\n n -= 1\n str += chr(ord('A') + int(n % 26))\n n = int(n / 26)\n str = str[::-1]\n return str\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.convertToTitle(26))\n","repo_name":"Phil2ng/LeetCode-by-Python","sub_path":"168. Excel Sheet Column Title.py","file_name":"168. Excel Sheet Column Title.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40099313095","text":"import urllib3\n\nurllib3.disable_warnings()\nhttp = urllib3.PoolManager()\nfrom PIL import Image\nfrom colorama import init, Fore, Back, Style\n\ninit()\nfrom time import sleep\nimport os, sys\nimport math\nimport io\nfrom os import listdir\nimport glob\nfrom requests import get\n\nprint(Fore.LIGHTMAGENTA_EX + \"\\n\"\n f'Started New Cosmetics Generator | Made by Kraypex')\n\n\ndef gen():\n res = get('https://benbot.app/api/v1/newCosmetics').json()['items']\n for i in res:\n name = i['name']\n desc = i['description']\n rarity = i['rarity']\n img = i['icons']['icon']\n id = i['id']\n print(Fore.CYAN + '[NCG] ', end=\"\")\n print(Fore.GREEN + f'Name: {name} Desc: {desc} Rarity: {rarity}')\n try:\n img = Image.open(io.BytesIO(http.urlopen(\"GET\", img).data))\n img.save(f\"Output/{id}.png\")\n except:\n img = 'your backup image url'\n img = Image.open(io.BytesIO(http.urlopen(\"GET\", img).data))\n img.save(f\"Output/{id}.png\")\n\n\ndef merge():\n try:\n print(Fore.CYAN)\n images_to_len = [file for file in listdir('Output')]\n x = len(images_to_len)\n folder = os.path.join(os.getcwd(), \"Output\")\n img_list = os.listdir(folder)\n img_list1 = []\n for i in img_list:\n img_list1.append(os.path.join(folder, i))\n\n for i in range(len(img_list1)):\n img_list1[i] = Image.open(img_list1[i])\n img_list1[i] = img_list1[i].resize((512, 512))\n if x <= 25:\n height = math.ceil(len(img_list1) / 5)\n width_accroding_to_len = 2560\n number_of_if = 5\n elif 35 > x > 25:\n height = math.ceil(len(img_list1) / 6)\n width_accroding_to_len = 3072\n number_of_if = 6\n elif 50 >= x > 35:\n height = math.ceil(len(img_list1) / 8)\n width_accroding_to_len = 4096\n number_of_if = 8\n elif x < 15:\n height = math.ceil(len(img_list1) / 4)\n width_accroding_to_len = 2048\n number_of_if = 4\n elif x > 50:\n height = math.ceil(len(img_list1) / 9)\n number_of_if = 9\n width_accroding_to_len = 4608\n\n new = Image.new(\"RGBA\", (width_accroding_to_len, 512 * height))\n\n w = 0\n h = 0\n for i in img_list1:\n new.paste(i, (512 * w, 512 * h))\n w = w + 1\n if w == number_of_if:\n w = 0\n h = h + 1\n new = new.save('Output\\merged.png')\n print(f\"{Fore.LIGHTCYAN_EX}-> All done the image is merged.\")\n except:\n print(\n f\"{Fore.RED}-> Merge Failed (Please check if the folder is empty or not)\"\n )\n\n\nprint(Fore.CYAN + '\\nDo you want to:')\nprint(Fore.LIGHTGREEN_EX)\nprint(\" (1) Grab all new cosmetics from the API \")\nprint(\" (2) Delete all the content of Output Folder \")\nprint(\" (3) Merge all images in Output File \")\nask = (input(\"- >>> \"))\n\nif ask == \"3\":\n merge()\n print(f\"{Fore.CYAN} All image merged...Closing...\")\n sleep(3)\n sys.exit()\nif ask == \"2\":\n del_files = glob.glob('output/*')\n for file in del_files:\n os.remove(file)\n\n print(f\"{Fore.LIGHTRED_EX}-> Deleting old content... \")\n print(f\"{Fore.RED}-> Content deleted... \\n\"\n ) #this part for delete the content of the [output]\n print(Fore.LIGHTGREEN_EX)\n ask_to_gen = (input(\n \"\\n-> Do you want to start Generate the items or Close [Gen] or [Close] >> \"\n ))\n if ask_to_gen == \"Gen\":\n print(f\"{Fore.YELLOW}-> GENERATION STARTED\\n\\n\")\n gen()\n print(\n \"\\n-> Do you want to merge the generated images ? y (yes) / n (no) : \"\n )\n ask_to_merge = (input(\">>>> \"))\n if ask_to_merge == \"y\":\n merge()\n elif ask_to_merge == \"n\":\n print(\"Not merging the generated images... Closing...\")\n sys.exit()\n else:\n print(\n f\"{Fore.LIGHTRED_EX}-> undefind answer please try again...closing\"\n )\n sleep(5)\n sys.exit()\n\n elif ask_to_gen == \"Close\":\n print(Fore.YELLOW)\n print(\"Closing the generator, thanks for using!\")\n sleep(2)\n sys.exit()\n\n else:\n print(\n f\"{Fore.LIGHTRED_EX}-> Undefind answer please try again...closing\")\n sleep(5)\n sys.exit()\nif ask == \"1\":\n print(f\"{Fore.YELLOW}-> Generation Started:\\n\\n\")\n gen()\n print(\n \"\\n-> Do you want to merge the generated images ? y (yes) / n (no) : \")\n ask_to_merge = (input(\">>>> \"))\n if ask_to_merge == \"y\":\n merge()\n elif ask_to_merge == \"n\":\n print(\"Not Merging the generated images... Closing...\")\n sys.exit()\n else:\n print(\n f\"{Fore.LIGHTRED_EX}-> Undefind answer please try again...closing\")\n sleep(5)\n sys.exit()\n","repo_name":"MrTahfari/Cosmetics","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21924306402","text":"\"\"\"\nYour chance to explore Loops and Turtles!\n\nAuthors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,\n their colleagues and Chip Daniel.\n\"\"\"\n########################################################################\n# DONE: 1.\n# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.\n########################################################################\n\n########################################################################\n# DONE: 2.\n#\n# You should have RUN the PREVIOUS module and READ its code.\n# (Do so now if you have not already done so.)\n#\n# Below this comment, add ANY CODE THAT YOUR WANT, as long as:\n# 1. You construct at least 2 rg.SimpleTurtle objects.\n# 2. Each rg.SimpleTurtle object draws something\n# (by moving, using its rg.Pen). ANYTHING is fine!\n# 3. Each rg.SimpleTurtle moves inside a LOOP.\n#\n# Be creative! Strive for way-cool pictures! Abstract pictures rule!\n#\n# If you make syntax (notational) errors, no worries -- get help\n# fixing them at either this session OR at the NEXT session.\n#\n# Don't forget to COMMIT your work by using VCS ~ Commit and Push.\n########################################################################\nimport rosegraphics as rg\nwindow = rg.TurtleWindow()\nwindow.delay(20)\n\n\nsize = 20\n\nfor k in range(10):\n\n sam= rg.SimpleTurtle()\n sam.pen = rg.Pen('green', 5)\n sam.speed = 1000\n sam.draw_regular_polygon(8,size)\n\n ben= rg.SimpleTurtle()\n ben.pen = rg.Pen('blue', 5)\n ben.speed = 1000\n ben.draw_regular_polygon(8,size + 3)\n size = size + 10\n\n nick= rg.SimpleTurtle()\n nick.pen = rg.Pen('red', 5)\n nick.speed = 1000\n nick.draw_regular_polygon(8, size + 6)\n\n\n\n\n","repo_name":"chipflw/IntroductionToPython","sub_path":"src/m5_your_turtles.py","file_name":"m5_your_turtles.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"35524908920","text":"from flask_bootstrap import Bootstrap\nfrom flask_cache import Cache\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\n\n# 创建sqlalchemy对象\ndb = SQLAlchemy()\n# 创建Migrate数据库迁移对象\nmigrate = Migrate()\n# 创建Cache对象,CACHE_TYPE参数有:\n# null: NullCache (default)\n# simple: SimpleCache\n# memcached: MemcachedCache (pylibmc or memcache required)\n# gaememcached: GAEMemcachedCache\n# redis: RedisCache (Werkzeug 0.7 required)\n# filesystem: FileSystemCache\n# saslmemcached: SASLMemcachedCache (pylibmc required)\ncache = Cache(config={\n 'CACHE_TYPE':'redis',\n 'CACHE_KEY_PREFIX':'python'\n})\n\ndef init_ext(app):\n # 初始化db\n db.init_app(app=app)\n # 初始化migrate\n migrate.init_app(app=app, db=db)\n # 初始化bootstrap对象,不需要在其他地方设置\n Bootstrap(app)\n # 初始化调试工具栏工具,debugtoolbar,不需要在其他地方设置\n DebugToolbarExtension(app)\n # 初始化缓存,flask-cache\n cache.init_app(app=app)\n\n","repo_name":"xzlmark/flask","sub_path":"flaskproject结构/App/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23829875588","text":"import matplotlib.pyplot as plt\r\nfrom matplotlib.animation import ArtistAnimation\r\nimport requests\r\n\r\nurl = requests.get('http://judge2.vdi.mipt.ru/ejudge/lab_01_mpl/frames.dat')\r\nLines = url.text.split('\\n')\r\nN = len(Lines)//2\r\nX, Y = [[float(j) for j in Lines[2*i].split()] for i in range(N)], [[float(j) for j in Lines[2*i + 1].split()] for i in range(N)]\r\nframes = []\r\nfig = plt.figure()\r\n\r\nfor i in range(N):\r\n frame, = plt.plot(X[i], Y[i], 'r-')\r\n tx = plt.text(0.5, 10, \"frame \" + str(i+1), fontsize = 16, c = '#FF5500')\r\n frames.append([frame,tx])\r\n\r\nminy, maxy = min(Y[5]) - 0.1 * abs(max(Y[5]) - min(Y[5])), max(Y[5]) + 0.1 * abs(max(Y[5]) - min(Y[5]))\r\nplt.title(\"f(x)\")\r\nplt.xlabel(\"x\")\r\nplt.ylabel(\"y\")\r\nplt.axis([min(X[0]), max(X[0]), miny, maxy])\r\nplt.grid(True)\r\n\r\nanim = ArtistAnimation(fig, frames, interval=200)\r\nplt.show()","repo_name":"KirikMaster/MIPT-assignments","sub_path":"3 sem/Matplotlib_labs/Episode_02.py","file_name":"Episode_02.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4587194531","text":"import socket, select, sys\n\nclass Server():\n connected_clients = []\n\n def shout(self, socket, message):\n for client in self.connected_clients:\n if client != socket:\n message = message.strip('\\n')\n client.send(message)\n\n def __init__(self, host, port):\n listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listen_socket.bind((host, int(port))) # this socket is bound to my port 9876\n listen_socket.listen(1) # specify the \"backlog\" for this socket\n\n while True:\n # create the input list\n read_list = [listen_socket, sys.stdin] + self.connected_clients\n (ready_list,_,_) = select.select(read_list,[],[])\n\n for ready in ready_list:\n if ready is listen_socket:\n conn, addr = ready.accept()\n self.connected_clients += [conn]\n elif ready == sys.stdin:\n msg = sys.stdin.readline()\n self.shout(listen_socket,msg)\n sys.stdout.write(\"\")\n sys.stdout.flush()\n else:\n data = ready.recv(1024)\n if len(data) == 0:\n self.connected_clients.remove(ready)\n else:\n sys.stdout.write(data.rstrip())\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n self.shout(ready, data.rstrip())\n","repo_name":"aleidaolvera/network_security","sub_path":"hw1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18515987806","text":"# https://leetcode.com/problems/balanced-binary-tree/\n\nfrom typing import Optional\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n \nclass Solution:\n def isBalanced(self, root: Optional[TreeNode]) -> bool:\n def check(root):\n if not root:\n return 0\n \n left = check(root.left)\n right = check(root.right)\n \n if left==-1 or right==-1 or abs(left-right)>1:\n return -1\n \n return max(left, right)+1\n return check(root)!=-1\n \n\nif __name__ == \"__main__\":\n '''\n root = TreeNode(3)\n root.left = TreeNode(9)\n root.right = TreeNode(20)\n root.right.left = TreeNode(15)\n root.right.right = TreeNode(7)'''\n \n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(2)\n root.left.left = TreeNode(3)\n root.left.right = TreeNode(3)\n root.left.left.left = TreeNode(4)\n root.left.left.right = TreeNode(4)\n \n sol = Solution()\n print(sol.isBalanced(root))","repo_name":"hanqpark/coding_test","sub_path":"interview/9. Tree/failed/balanced-binary-tree.py","file_name":"balanced-binary-tree.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17894401265","text":"from os import getcwd\nfrom os.path import split, join\nimport json\nimport click\n\nGLOBAL_CONFIG = \".weaver_global.json\"\nCART_CONFIG = \"config.json\"\n\n\ndef _get_config(name):\n global_config, root = _locate_global_config()\n cart_config, cart_location = _locate_cart_config(root, name)\n\n config = global_config\n config.update(cart_config)\n\n config['fs_root'] = root\n config['fs_cart'] = cart_location\n config['short_name'] = name\n\n return config\n\n\ndef _locate_global_config():\n current_dir = getcwd()\n while True:\n try:\n return json.load(open(join(current_dir, GLOBAL_CONFIG))), current_dir\n except:\n old = current_dir\n current_dir, tail = split(current_dir)\n if current_dir == old:\n raise FileNotFoundError\n\n\ndef _locate_cart_config(root, name):\n cart_location = join(root, 'carts', name, CART_CONFIG)\n try:\n return json.load(open(cart_location)), cart_location\n except:\n click.echo(\"Could not find cart\")\n raise FileNotFoundError\n\n\n@click.group()\ndef cli():\n \"\"\"Welcome to pico dev tools\"\"\"\n\n\n@cli.command()\n@click.pass_context\ndef showcarts(ctx):\n click.echo(\"Show Carts\")\n\n\n@cli.command()\n@click.pass_context\n@click.argument(\"name\")\ndef build(ctx, name):\n click.echo(\"Building {}\".format(name))\n config = _get_config(name)\n print(config)\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"Marazan/pico8-dev-tools","sub_path":"weaver/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38104971310","text":"#Desafio do palíndromo\n\nx1=input('Digite uma frase ou palavra: ').strip()\n\ny1=x1.replace(' ','')\n#print(y1)\n\ny2=int(len(y1))\n#print(f'y2={y2}\\n')\n\ny3=0\n\nfor c in range(0,y2):\n if y1[c] == y1[y2 - 1 - c]:\n y3+=0\n else:\n y3+=1\nif y3==0:\n print('sim')\nelse:\n print('não')\n\n#for c in range(0,y2):\n# print(y1[c])\n# print(y1[y2-c-1])\n# if y1[c]==y1[y2-1-c]:\n# print('sim')\n# else:\n# print('não')\n","repo_name":"ToledoLBC/Aulas","sub_path":"Python/Exercícios Python/aulas/2.13 - desafio 53.py","file_name":"2.13 - desafio 53.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16790164337","text":"name = (input(\"Enter your name.\\n\"))\r\ncrush = input(\"Enter your crush's name\\n\")\r\niowercaseName = name.lower()\r\niowercaseCrush = crush.lower()\r\ntotaiPoints = 0\r\n\r\nif('t' in iowercaseName or 't' in iowercaseCrush):\r\n points = iowercaseName.count('t') + iowercaseCrush.count('t')\r\n totaiPoints += points\r\n print(f\"T occurs {points} times\")\r\n\r\nif('r' in iowercaseName or 'r' in iowercaseCrush):\r\n points = iowercaseName.count('r') + iowercaseCrush.count('r')\r\n totaiPoints += points\r\n print(f\"R occurs {points} times\")\r\n\r\nif('u' in iowercaseName or 'u' in iowercaseCrush):\r\n points = iowercaseName.count('u') + iowercaseCrush.count('u')\r\n totaiPoints += points\r\n print(f\"U occurs {points} times\")\r\n\r\nif('e' in iowercaseName or 'e' in iowercaseCrush):\r\n points = iowercaseName.count('e') + iowercaseCrush.count('e')\r\n totaiPoints += points\r\n print(f\"E occurs {points} times\")\r\n\r\nprint(f\"Total = {totaiPoints}\")\r\n\r\ntotaiPoints2 = 0\r\n\r\nif('l' in iowercaseName or 'l' in iowercaseCrush):\r\n points2 = iowercaseName.count('l') + iowercaseCrush.count('l')\r\n totaiPoints2 += points2\r\n print(f\"L occurs {points2} times\")\r\n\r\nif('o' in iowercaseName or 'o' in iowercaseCrush):\r\n points2 = iowercaseName.count('o') + iowercaseCrush.count('o')\r\n totaiPoints2 += points2\r\n print(f\"O occurs {points2} times\")\r\n\r\nif('v' in iowercaseName or 'v' in iowercaseCrush):\r\n points2 = iowercaseName.count('v') + iowercaseCrush.count('v')\r\n totaiPoints2 += points2\r\n print(f\"V occurs {points2} times\")\r\n\r\nif('e' in iowercaseName or 'e' in iowercaseCrush):\r\n points2 = iowercaseName.count('e') + iowercaseCrush.count('e')\r\n totaiPoints2 += points2\r\n print(f\"E occurs {points2} times\")\r\n\r\nprint(f\"Total = {totaiPoints2}\")\r\n\r\ngrandTotal = str(totaiPoints) + str(totaiPoints2)\r\nfinalTotal = int(grandTotal)\r\n\r\nif(finalTotal < 10 or finalTotal > 90):\r\n print(f\"Your score is {finalTotal}, you go together like coke and mentos.\")\r\n\r\nelif(finalTotal > 40 and finalTotal < 50):\r\n print(f\"Your score is {finalTotal}, you are alright together.\")\r\n\r\nelse:\r\n print(f\"Your score is {finalTotal}.\")","repo_name":"ZacharyFulce/Python","sub_path":"100DaysOfCode/Day 3/loveCalculator.py","file_name":"loveCalculator.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6903916680","text":"from django.shortcuts import render\nfrom .utils import unit_conversion\nfrom django.http import JsonResponse\n\nfrom django.views.decorators.csrf import csrf_exempt\n# Create your views here.\n\nimport json\nimport urllib.request\nimport logging\n\n# Create your views here.\n@csrf_exempt\ndef index(request):\n if request.method == 'POST':\n post = json.loads(request.body.decode('utf-8'))\n city = post.get('city')\n temp_unit = post.get('temp_unit', \"C\")\n wind_unit = post.get('wind_unit', \"kmh\")\n logging.basicConfig(level=logging.INFO)\n logging.info(request.POST)\n logging.info(city)\n logging.info(temp_unit)\n logging.info(wind_unit)\n logging.info(request.body)\n\n #I can probably do this with the requests moduleS\n res = urllib.request.urlopen('http://api.openweathermap.org/data/2.5/weather?q='+str(city)+'&appid=cb771e45ac79a4e8e2205c0ce66ff633').read()\n #logging.info(res)\n json_data = json.loads(res)\n \n data = {\n \"country_code\": str(json_data['sys']['country']),\n \"main\" : json_data[\"weather\"][0][\"description\"],\n \"coordinate\": str(json_data['coord']['lon']) + ' ' +\n str(json_data['coord']['lat']),\n \"temp\": unit_conversion(json_data['main']['temp'], temp_unit), #TRY TO MAKE AN OPTION TO CHANGE THE UNIT\n \"wind\" : unit_conversion(json_data['wind']['speed'], wind_unit),\n \"pressure\": str(json_data['main']['pressure']),\n \"humidity\": str(json_data['main']['humidity']),\n } \n logging.info(data)\n\n else:\n city = ''\n data = {}\n return JsonResponse(data) #render(request, 'index.html', {'city': city, 'data': data})","repo_name":"JaviMaligno/DjangoTutorial","sub_path":"weatherapp/weatherdetector/weather/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15933353604","text":"import sys\n\n#sales tax for Seattle\nDEFAULT_SALES_TAX = 0.101\n#number of bonus stars per $1 pretax spent\nDEFAULT_BONUS_STARS_PER_UNIT = 2\n#ratio of bonus stars : $ amount pretax spent\nDEFAULT_RATIO = 10\n\ndef main() :\n '''Main'''\n \n if len(sys.argv) == 3 :\n bonusStars = int(sys.argv[1])\n cost = float(sys.argv[2])\n bonusStarsPerUnit = DEFAULT_BONUS_STARS_PER_UNIT\n salesTax = DEFAULT_SALES_TAX\n ratio = DEFAULT_RATIO\n elif len(sys.argv) == 6 :\n bonusStars = int(sys.argv[1])\n cost = float(sys.argv[2])\n bonusStarsPerUnit = int(sys.argv[3]) \n salesTax = float(sys.argv[4]) \n ratio = float(sys.argv[5])\n else :\n print (\"Incorrect argument count. Usage:\")\n print (\"\\tpython \" + sys.argv[0] + \" [ ]\")\n return\n\n calculatedRatio = (bonusStars + (bonusStarsPerUnit * cost)) / ((1 + salesTax) * cost)\n\n if calculatedRatio >= ratio :\n print (\"WORTH IT!\")\n print (\"Ratio: {:.2f} >= {:.2f}\".format(calculatedRatio, ratio))\n else :\n print (\"DON'T WASTE YOUR MONEY!\")\n print (\"Ratio: {:.2f} < {:.2f}\".format(calculatedRatio, ratio))\n\n return\n\nif __name__ == \"__main__\" :\n main()\n\n","repo_name":"N8Stewart/StarbucksBonusStarCalculator","sub_path":"starbucks.py","file_name":"starbucks.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15591904011","text":"import numpy as np\n\nfrom dzutils.pyrosetta_utils.geometry.parametric import ca_array\nfrom dzutils.pyrosetta_utils.geometry import (\n homog_from_four_points,\n np_homog_to_rosetta_rotation_translation,\n homog_from_3_CA,\n)\nfrom pyrosetta.rosetta.numeric import xyzVector_double_t as rosetta_vector\nfrom pyrosetta.rosetta.protocols.toolbox.pose_manipulation import (\n rigid_body_move,\n)\nfrom pyrosetta.rosetta.core.select.residue_selector import TrueResidueSelector\n\n\ndef align_frag_to_ideal_helix(frag, align_res, **params):\n \"\"\"\n Align and return frag by stub to ideal helix described by the params given\n \"\"\"\n r0 = params[\"r0\"]\n omega0 = params[\"omega0\"]\n omega1 = params[\"omega1\"]\n phi0 = params[\"phi0\"]\n phi1 = params[\"phi1\"]\n delta_z = params[\"delta_z\"]\n invert = params[\"invert\"]\n ideal_ca = ca_array(r0, omega0, omega1, phi0, phi1, delta_z, 3, invert)\n ca_homog = homog_from_four_points(ideal_ca[1], *ideal_ca)\n # targ_stub = Stub()\n # targ_stub.from_four_points(\n # *[frag.residue(num).xyz(\"CA\") for num in [res_2, res_1, res_2, res_3]]\n # )\n targ_homog = homog_from_3_CA(frag, align_res)\n super_xform = ca_homog @ np.linalg.inv(targ_homog)\n rotation, translation = np_homog_to_rosetta_rotation_translation(\n super_xform\n )\n rigid_body_move(\n rotation,\n translation,\n frag,\n TrueResidueSelector().apply(frag),\n rosetta_vector(0, 0, 0),\n )\n return frag\n\n\ndef overlay_poses_by_3_CA(mob_pose, targ_pose, mob_res, targ_res):\n \"\"\"\n Returns mob_pose res i - i+2 overlaid onto the respective res of targ_pose\n \"\"\"\n # \"Stub\" homog xform for the mobile pose\n mob_homog = homog_from_3_CA(mob_pose, mob_res)\n # \"Stub\" homog xform for the target pose\n targ_homog = homog_from_3_CA(targ_pose, targ_res)\n super_xform = targ_homog @ np.linalg.inv(mob_homog)\n rotation, translation = np_homog_to_rosetta_rotation_translation(\n super_xform\n )\n rigid_body_move(\n rotation,\n translation,\n mob_pose,\n TrueResidueSelector().apply(mob_pose),\n rosetta_vector(0, 0, 0),\n )\n\n\ndef main():\n \"\"\"\n \"\"\"\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dmitropher/dzutils","sub_path":"dzutils/pyrosetta_utils/phos_binding/misc_scripts/grafting_helical_fragments.py","file_name":"grafting_helical_fragments.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71285977545","text":"import os, re\n\ndef filterString(fileName, fileType):\n if fileType == \"folder\":\n newName = list(re.sub(\"[\\[{)\\}(+\\-.\\]]\", \"\", fileName))\n newName = ''.join(newName)\n return re.sub(' +', ' ', newName).strip()\n else :\n newName = list(re.sub(\"[\\[{)\\}(+\\-.\\]]\", \"\", fileName[:-4]))\n newName = ''.join(newName + list(fileName)[-4:])\n return re.sub(' +', ' ', newName).strip()\n# break regex into filter helper2 w/ tests (to include whitespace filtration from line 7/11)\ndef renameUtility(path):\n files = os.listdir(path)\n renameFilesCount = 0\n for index, file in enumerate(files):\n if os.path.isdir(os.path.join(path, ''.join(file))):\n renameFilesCount += renameUtility(os.path.join(path, ''.join(file)))\n newName = filterString(file, \"folder\")\n rejoinedPathName = os.path.join(path, ''.join(newName))\n # os.rename(os.path.join(path, file), rejoinedPathName)\n else :\n newName = filterString(file, \"file\")\n if newName != file:\n renameFilesCount = renameFilesCount + 1\n # print(\"rename files count \" + str(renameFilesCount))\n os.rename(os.path.join(path, file), os.path.join(path, ''.join(newName)))\n\n return renameFilesCount\n\n# renameUtility(\"C:\\\\Users\\\\Timothy\\Desktop\\\\target folder\\\\subTargetFolder\")\n# build path parser to avoid having to add \\\\\n# line 25 is hitting, but renameFilesCount isn't being updated?","repo_name":"FlimothyCrow/Python","sub_path":"renameUtility.py","file_name":"renameUtility.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39684697840","text":"#接收端绑定端口\nfrom socket import *\n\nudpsocket=socket(AF_INET,SOCK_DGRAM)\n\nbindAddr=('',8080) #绑定本机端口\nudpsocket.bind(bindAddr)\n\n\nwhile True:\n recvData=udpsocket.recvfrom(1024)\n context,destAddr=recvData\n #print(recvData)\n print(context.decode(\"gb2312\"),destAddr) #接收后,decode解码\n\n\n","repo_name":"weilink025/python3x-learnning","sub_path":"python大佬炮制/网络编程/socket-编程/UDP/2-UDP socke接收端.py","file_name":"2-UDP socke接收端.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18919439217","text":"import random, datetime\n\nfrom app.models import Game, User, Rating, db\nfrom flask.ext.script import Command, Option\nimport rating.rating_math as rm\n\ndef sanitized_users(g_vec):\n \"\"\" Strip out users with no games from our list \"\"\"\n users = User.query.all() \n users_with_games = set([g[0] for g in g_vec])\n users_with_games = users_with_games | set([g[1] for g in g_vec])\n new_users = [u for u in users if u.aga_id and int(u.aga_id) in users_with_games] # aga_id is text?!?\n return new_users\n\ndef sanitized_games(games):\n \"\"\" Sanitizes a list of games into result tuples for rating.\n A result tuple is the form (w, b, result, date, handicap, komi)\n Where 'result' is 1 if w won and 0 otherwise.\n \"\"\"\n\n g_vec = []\n for g in games:\n if g.white.user_id is None or g.black.user_id is None:\n print('No ids : ', g) #should probably strip them from the db.\n pass\n elif g.handicap > 1 and (-1 > g.komi > 1):\n #print('bad komi : ', g)\n pass\n elif g.handicap > 6:\n continue\n elif g.komi < 0 or g.komi > 8.6:\n #print('bad komi : ', g)\n pass\n elif not (g.result.startswith('W') or g.result.startswith('B')):\n print('unknown result: ', g)\n pass\n elif g.date_played is None or g.date_played.timestamp() == 0.0:\n print('No date played: ', g)\n pass\n else:\n # Vector of result_tuples. Just what we need to compute ratings...\n g_vec.append( (g.white.id, \n g.black.id,\n 1.0 if g.result.startswith('W') else 0.0,\n g.date_played.timestamp(),\n g.handicap,\n g.komi)) \n return g_vec \n\ndef rate_all(t_from=datetime.datetime.utcfromtimestamp(1.0), \n t_to=datetime.datetime.now(),\n iters=200, lam=.22):\n \"\"\"\n t_from -- datetime obj, rate all games after this \n t_to -- datetime obj, rate all games up to this\n iters -- number of iterations\n lam -- 'neighborhood pull' parameter. higher = more error from moving a rank away from ranks in its neighborhood\n \"\"\"\n games = Game.query.filter(Game.date_played < t_to, Game.date_played > t_from) \n g_vec = sanitized_games(games)\n users = sanitized_users(g_vec)\n\n print(\"found %d users with %d valid games\" % (len(users), len(g_vec)) )\n\n aga_ids_to_uids = dict([(int(u.aga_id), u.id) for u in users])\n\n ratings = {int(u.aga_id): u.last_rating() for u in users}\n rating_prior = {id: v.rating if (v and v.rating) else 20 for id,v in ratings.items()} \n print (\"%d users with no priors\" % len(list(filter(lambda v: v == 20, rating_prior.values()))))\n\n neighbors = rm.neighbors(g_vec)\n neighbor_avgs = rm.compute_avgs(g_vec, rating_prior) \n\n t_min = min([g[3] for g in g_vec])\n t_max = max([g[3] for g in g_vec])\n\n lrn = lambda i: ((1. + .1*iters)/(i + .1 * iters))**.3 #Control the learning rate over time.\n\n for i in range(iters):\n loss = 0\n # Accumulate the neighborhood loss prior to changing the ratings around\n for id, neighbor_wgt in neighbor_avgs.items():\n loss += lam * ((rating_prior[id] - neighbor_wgt) ** 2)\n\n # Shuffle the vector of result-tuples and step through them, accumulating error.\n random.shuffle(g_vec)\n for g in g_vec:\n w, b, actual, t, handi, komi = g\n odds = rm.expect(rating_prior[b], rating_prior[w], handi, komi)\n weight = rm.time_weight(t, t_min, t_max)\n rating_prior[w] -= lrn(i) * (weight*(odds - actual)*odds*(1-odds) + (lam/len(neighbors[w]) * (rating_prior[w] - neighbor_avgs[w])))\n rating_prior[b] -= lrn(i) * (-1.0 * weight*(odds - actual)*odds*(1-odds) + (lam/len(neighbors[b]) * (rating_prior[b] - neighbor_avgs[b])))\n loss += weight * ((odds - actual) ** 2)\n\n # Scale the ratings\n r_min = min(rating_prior.values())\n r_max = max(rating_prior.values()) \n if r_max != r_min:\n for k,v in rating_prior.items():\n rating_prior[k] = (rating_prior[k] - r_min) / (r_max - r_min) * 40.0\n\n #update neighborhood averages?\n neighbor_avgs = rm.compute_avgs(g_vec, rating_prior) \n if (i % 50 == 0):\n print('%d : %.4f' % (i, loss))\n\n # Update the ratings and show how we did.\n wins, losses = {}, {}\n for g in g_vec:\n wins[g[0]] = wins.get(g[0], 0) + g[2]\n losses[g[0]] = losses.get(g[0], 0) + 1-g[2]\n wins[g[1]] = wins.get(g[1], 0) + 1-g[2]\n losses[g[1]] = losses.get(g[1], 0) + g[2]\n\n for k in sorted(rating_prior, key=lambda k: rating_prior[k])[-10:]: \n print(\"%d (uid: %d): %f (%d - %d)\" % (k, aga_ids_to_uids[k], rating_prior[k], wins.get(k,0), losses.get(k,0)) )\n \n for k in sorted(rating_prior, key=lambda k: rating_prior[k]): \n db.session.add(Rating(user_id=aga_ids_to_uids[k], rating=rating_prior[k], created=t_to))\n db.session.commit()\n\n\nclass RatingsAtCommand(Command):\n \"\"\" Class that holds the state for computing a single ratings run at a given point in time\"\"\"\n option_list = (\n Option('--from', '-f', dest='t_from'),\n Option('--to', '-t', dest='t_to'),\n Option('--iterations', '-i', dest='iters', default=200),\n Option('--neighborhood', '-n', dest='neighborhood', default=0.15)\n )\n\n def run(self, t_from, t_to, iters, neighborhood):\n if t_to is None:\n t_to = datetime.datetime.now()\n else:\n try: \n t_to = datetime.datetime.utcfromtimestamp(float(t_to))\n except ValueError: \n t_to = datetime.datetime.strptime(t_to, \"%Y-%m-%d\")\n\n if t_from is None:\n t_from = datetime.datetime.utcfromtimestamp(1.0)\n else:\n try: \n t_from = datetime.datetime.utcfromtimestamp(float(t_from))\n except ValueError: \n t_from = datetime.datetime.strptime(t_from, \"%Y-%m-%d\")\n\n try:\n iters = int(iters)\n except ValueError:\n print(\"Iters should be an integer, defaulting to 200\")\n iters = 200\n\n try:\n neighborhood = float(neighborhood)\n except ValueError:\n print(\"Neighborhood should be an integer, defaulting to .15\")\n neighborhood = .15\n\n this_to = t_from + datetime.timedelta(365*2)\n while this_to < t_to:\n print(\"==\")\n print(\"Generating ratings of games played between %s and %s\" % (t_from, this_to)) \n print(\"%d iterations, neighborhood pull parameter %f\" % (iters, neighborhood))\n rate_all(t_from, this_to, iters, neighborhood)\n this_to += datetime.timedelta(30) \n #rate_all(t_from, t_to, iters, neighborhood)\n\n","repo_name":"usgo/online-ratings","sub_path":"web/scripts/rate_all.py","file_name":"rate_all.py","file_ext":"py","file_size_in_byte":6951,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"10482103576","text":"\"\"\"\nSchemas for \"companies\" related models\n\"\"\"\n\nfrom marshmallow import fields, validate\nfrom marshmallow_sqlalchemy import field_for\n\nfrom app import ma\nfrom app.base import constants as APP\nfrom app.base.schemas import default_exclude, BaseReadArgsSchema, user_fields\nfrom app.resources.companies.models import Company\nfrom app.resources.companies import constants as COMPANY\nfrom app.resources.accounts import constants as ACCOUNT\n\n\nclass ManagementProfileSchema(ma.Schema):\n \"\"\"\n Schema for loading \"management profile\" detail from request, and also\n formatting output\n \"\"\"\n contact_name = fields.String()\n contact_designation = fields.String()\n contact_email = fields.Email()\n\n\nclass CompanySchema(ma.ModelSchema):\n \"\"\"\n Schema for loading \"Company\" from requests, and also formatting output\n \"\"\"\n\n company_name = field_for(Company, 'company_name', validate=[\n validate.Length(min=1, error=APP.MSG_NON_EMPTY),\n validate.Length(max=COMPANY.COMPANY_NAME_MAX_LENGTH,\n error=APP.MSG_LENGTH_EXCEEDS)])\n account_type = field_for(Company, 'account_type', validate=validate.OneOf(\n ACCOUNT.ACCT_TYPES))\n\n management_profile = fields.List(fields.Nested(ManagementProfileSchema))\n\n class Meta:\n model = Company\n include_fk = True\n load_only = ('updated_by', 'created_by')\n dump_only = default_exclude + ('updated_by', 'created_by')\n\n links = ma.Hyperlinks({\n 'self': ma.URLFor('api.companyapi', row_id=''),\n 'collection': ma.URLFor('api.companylistapi')\n }, dump_only=True)\n\n creator = ma.Nested(\n 'app.resources.users.schemas.UserSchema', only=user_fields,\n dump_only=True)\n sector = ma.Nested(\n 'app.resources.sectors.schemas.SectorSchema', only=['row_id', 'name'],\n dump_only=True)\n industry = ma.Nested(\n 'app.resources.industries.schemas.IndustrySchema',\n only=['row_id', 'name'], dump_only=True)\n\n\nclass CompanyReadArgsSchema(BaseReadArgsSchema):\n \"\"\"\n Schema for reading \"Company\" filters from request args\n \"\"\"\n\n company_name = fields.String(load_only=True)\n account_type = fields.String(load_only=True, validate=validate.OneOf(\n ACCOUNT.ACCT_TYPES))\n sector_id = fields.Integer(load_only=True)\n industry_id = fields.Integer(load_only=True)\n","repo_name":"Witzcode0/Exchange-connect","sub_path":"app/resources/companies/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33656418684","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.contrib import messages\nfrom .forms import SignUpForm,card_lookup,AddRecordForm,price_edit,image_upload,SearchForm\nfrom .models import Record,card_user_table,grader_names,team_names\nfrom .funcs import lookup_card,create_search_query,add_record_to_db,get_s3_url\nfrom django.contrib.auth.models import User \nfrom decimal import Decimal\nfrom django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage, PageNotAnInteger\nfrom django.core.files.uploadedfile import TemporaryUploadedFile\n\n# Create your views here.\nMESSAGE_TAGS = {\n messages.INFO: \"\"\n}\n\ndef image_uploader(request):\n if request.method == 'POST':\n form = image_upload(request.POST, request.FILES)\n print(form.files)\n if form.is_valid():\n uploaded_image = str(form.cleaned_data['image']).replace(' ','_')\n print(form.cleaned_data['image'])\n url = get_s3_url(f'images/{uploaded_image}')\n print(url)\n\n \n form.save()\n return render(request, 'image_upload.html', {'form': form,'img_url':url})\n else:\n messages.success(request=request, message=\"Failed to update image\")\n return render(request, 'image_upload.html', {'form': form})\n else:\n form = image_upload()\n return render(request, 'image_upload.html', {'form': form})\n\n\n\ndef home(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request=request,username=username,password=password)\n\n if user is not None:\n login(request=request,user=user)\n messages.success(request=request, message=\"You have been successfully logged in\")\n return redirect('home')\n else:\n messages.success(request=request, message=\"Failed to logged in\")\n return redirect('home')\n else:\n queryset = card_user_table.objects.filter(user_id=request.user.id).order_by('-average_price')\n if len(queryset)>0:\n items_per_page = 5\n paginator = Paginator(queryset, items_per_page)\n page = request.GET.get('page')\n try:\n items = paginator.page(page)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n except PageNotAnInteger:\n items = paginator.page(1)\n return render(request, 'home.html',{'records':queryset,'page':page, 'items':items})\n else:\n return render(request, 'home.html')\n\ndef logout_user(request):\n logout(request=request)\n messages.success(request=request, message=\"You have been logged out\")\n return redirect('home')\n\ndef register_user(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data['username']\n password = form.cleaned_data['password1']\n user = authenticate(username=username,password=password)\n login(request=request,user=user)\n messages.success(request=request, message=\"You have successfully registered\")\n return redirect('home')\n else:\n form = SignUpForm()\n return render(request=request,template_name='register.html',context={'form':form})\n return render(request=request,template_name='register.html',context={'form':form})\n\n\ndef card_lookup_page(request):\n user_id = request.user.id\n if request.method == 'POST':\n img_form = image_upload(request.POST, request.FILES)\n if 'search' in request.POST: \n form = card_lookup(request.POST)\n \n if form.is_valid():\n search_query = create_search_query(form=form,user_id=user_id) \n request.session['lookup_data'] = search_query[1]\n print(search_query)\n results = lookup_card(search_query[0])\n print(results)\n if results is not None:\n bad_search = False\n price = results['average_price']\n print(price)\n initial ={\n 'price_field':price\n }\n price_form = price_edit(initial=initial)\n results['search_crit'] = results['search_crit'].replace('+',' ')\n else:\n results = None\n bad_search = True\n price_form = price_edit()\n # print(search_query[1])\n \n \n\n \n request.session['result_data'] = results\n \n return render(request=request, template_name='card_lookup.html',context={'form':form, 'results':results, 'search_query':search_query[0],'bad_search':bad_search,'price_form':price_form,'img_form':img_form})\n \n if 'add' in request.POST:\n print('adding..')\n \n price = Decimal(price_edit(request.POST).data.get('price_field'))\n search_data = request.session['lookup_data']\n result_data = request.session['result_data']\n print(len(img_form.files))\n if img_form.is_valid() and len(img_form.files)>0:\n if len(img_form.files['image']) > 0:\n uploaded_image = str(img_form.files['image']).replace(' ','_')\n img_url = get_s3_url(f'images/{uploaded_image}') \n request.session['result_data']['img']=img_url\n img_form.save()\n result_data['average_price'] = price\n form = card_lookup()\n \n add_record_to_db(search_data=request.session['lookup_data'],result_data=request.session['result_data'])\n messages.success(request=request, message=\"Card Added\")\n return render(request=request, template_name='card_lookup.html',context={'form':form})\n\n else:\n form = card_lookup()\n return render(request=request, template_name='card_lookup.html',context={'form':form})\n\n \n \n \n\n\ndef customer_record(request,pk):\n if request.user.is_authenticated:\n previous_url = request.META.get('HTTP_REFERER')\n customer_record = card_user_table.objects.get(card_id=pk)\n \n return render(request=request, template_name='record.html',context={'customer_record':customer_record, 'previous_url':previous_url})\n else:\n messages.success(request=request, message=\"You must be logged in\")\n return redirect('home')\n \ndef delete_card(request,pk):\n if request.user.is_authenticated:\n delete_it = card_user_table.objects.get(card_id=pk)\n delete_it.delete()\n messages.success(request=request, message=\"Record Deleted\")\n return redirect('home')\n else:\n messages.success(request=request, message=\"You must be logged in\")\n return redirect('home')\n\ndef add_record(request):\n form = AddRecordForm(request.POST or None)\n if request.user.is_authenticated:\n if request.method == 'POST':\n print(request.session['my_data'])\n if form.is_valid():\n add_record = form.save()\n messages.success(request=request, message=\"Record Added\")\n return redirect('home')\n\n return render(request=request, template_name='add_record.html',context={'form':form})\n else:\n messages.success(request=request, message=\"You must be logged in\")\n return redirect('home')\n\ndef update_record(request,pk):\n if request.user.is_authenticated:\n current_record = Record.objects.get(id=pk)\n form = AddRecordForm(request.POST or None, instance=current_record)\n if form.is_valid():\n form.save()\n messages.success(request=request, message=\"Record Updated\")\n return render(request=request, template_name='update_record.html',context={'form':form})\n else:\n messages.success(request=request, message=\"You must be logged in\")\n return redirect('home')\n \n\ndef search_view(request):\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n search_query = form.cleaned_data['search_query']\n # Perform the search based on the search_query\n # You can use the search_query to filter your data\n # and render the results in the template\n return render(request, 'results.html', {'search_query': search_query})\n else:\n form = SearchForm()\n \n return render(request, 'search.html', {'form': form})","repo_name":"samdandy/card_shum","sub_path":"venv/player/website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26533505904","text":"import requests, bs4, os\n\nurl='https://xkcd.com'\n\nos.makedirs('xkcd', exist_ok=True) # exist_ok=True: if the directory already exists, don't raise an exception\n\n# Repeats until it reaches the first comic\nfor page in range(0, 5):\n # Loads the XKCD home page\n xkcd_res=requests.get(url)\n xkcd_res.raise_for_status()\n\n # Saves the comic image on that page\n xkcd_soup=bs4.BeautifulSoup(xkcd_res.text, 'html.parser')\n # Follows the Previous Comic link\n \n xkcd_comic_img_src=xkcd_soup.select('#comic img')[0].get('src')\n\n xkcd_comic_img_res=requests.get(f'https:{xkcd_comic_img_src}')\n file_name=os.path.basename(f'https:{xkcd_comic_img_src}')\n xkcd_comic_img_res.raise_for_status()\n xkcd_comic_file=open(f'xkcd/{file_name}', 'wb')\n for chunk in xkcd_comic_img_res.iter_content(100000):\n xkcd_comic_file.write(chunk)\n xkcd_comic_file.close()\n\n xkcd_prev_href=xkcd_soup.select('a[rel=\"prev\"]')[0].get('href')\n url=f'https://xkcd.com{xkcd_prev_href}'\n # print(xkcd_prev_href) # /2835/","repo_name":"gooogyeong/tutorial-python","sub_path":"web-scraping/downloadXkcd.py","file_name":"downloadXkcd.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10453533082","text":"import cv2\r\nimport numpy as np\r\nimport os\r\n# from utils.metrics import Evaluator\r\n# from sklearn.metrics import jaccard_score, f1_score\r\n\r\n\r\n# Define the Args class\r\nclass Args:\r\n def __init__(self):\r\n self.dataset = '356a192b7913b04c54574d18c28d46e6395428ab' # Updated dataset path\r\n\r\n# Create an instance of Args\r\nargs = Args()\r\n\r\n# Define the base paths\r\ninput_base_path = r'E:\\OneDrive - mmu.edu.my\\Documents - ITP\\lzw\\tia_coanet\\out_imgs_1300' # Updated input base path\r\noutput_base_path = r'E:\\OneDrive - mmu.edu.my\\Documents - ITP\\lzw\\tia_coanet\\out_imgs_1300' # Updated output base path\r\n\r\ndataset = args.dataset\r\n\r\n# Folder containing all the images\r\nimage_folder = os.path.join(input_base_path, dataset, 'result6')\r\ngt_folder = os.path.join(input_base_path, dataset, 'result6')\r\n# gt_mask_folder = os.path.join(gt_folder, dataset, 'result6')\r\noutput_directory = os.path.join(output_base_path, dataset, 'OCV_result6')\r\n\r\n\r\n# iou_scores = []\r\n# dice_scores = []\r\n\r\nos.makedirs(output_directory, exist_ok=True)\r\n\r\n# Get a list of all image filenames\r\nimage_filenames = [filename for filename in os.listdir(image_folder) if filename.endswith(\"_sat.png\")]\r\n\r\n# Create an Evaluator for accuracy metrics\r\n# evaluator = Evaluator(num_class=2) # Assuming you have 2 classes, modify accordingly\r\n\r\nfor image_filename in image_filenames:\r\n # Construct paths to the original image, predicted mask image, and output directory\r\n original_image_path = os.path.join(image_folder, image_filename)\r\n pred_mask_image_path = os.path.join(image_folder, image_filename.replace(\"_sat.png\", \"_pred.png\"))\r\n output_image_path = os.path.join(output_directory, image_filename.replace(\"_sat.png\", \"_result.png\"))\r\n # gt_mask_image_path = os.path.join(gt_mask_folder, image_filename.replace(\"_sat.png\", \"_gt.png\"))\r\n\r\n\r\n # Load the original image\r\n original_image = cv2.imread(original_image_path)\r\n\r\n # Load the predicted mask image\r\n pred_mask_image = cv2.imread(pred_mask_image_path, cv2.IMREAD_GRAYSCALE)\r\n\r\n # Load the ground truth mask image\r\n # gt_mask_image = cv2.imread(gt_mask_image_path, cv2.IMREAD_GRAYSCALE)\r\n\r\n # Ensure the mask is binary (either 0 or 255)\r\n _, pred_mask_image = cv2.threshold(pred_mask_image, 1, 255, cv2.THRESH_BINARY)\r\n\r\n # Ensure the mask is binary (either 0 or 255)\r\n # _, gt_mask_image = cv2.threshold(gt_mask_image, 1, 255, cv2.THRESH_BINARY)\r\n\r\n # Apply the mask to the original image\r\n result_image = cv2.bitwise_and(original_image, original_image, mask=pred_mask_image)\r\n\r\n # Save the result image to the output directory\r\n cv2.imwrite(output_image_path, result_image)\r\n\r\n # Convert the result image to grayscale\r\n result_image_gray = cv2.cvtColor(result_image, cv2.COLOR_BGR2GRAY)\r\n\r\n # Add the predicted mask and the result image to the evaluator\r\n # evaluator.add_batch(pred_mask_image, result_image_gray)\r\n\r\n # # Calculate the IoU and Dice Coefficient\r\n # iou = jaccard_score(gt_mask_image.flatten(), pred_mask_image.flatten())\r\n # dice = f1_score(gt_mask_image.flatten(), pred_mask_image.flatten())\r\n\r\n # iou_scores.append(iou)\r\n # dice_scores.append(dice)\r\n\r\n # # Calculate pixel accuracy\r\n # correct_pixels = np.sum((gt_mask_image == pred_mask_image) & (gt_mask_image > 0))\r\n # total_pixels = np.sum(gt_mask_image > 0)\r\n # pixel_accuracy = correct_pixels / total_pixels\r\n # pixel_accuracy_scores.append(pixel_accuracy)\r\n\r\n\r\n# # Calculate the mean IoU, Dice Coefficient, and Pixel Accuracy\r\n# mean_iou = np.mean(iou_scores)\r\n# mean_dice = np.mean(dice_scores)\r\n# mean_pixel_accuracy = np.mean(pixel_accuracy_scores)\r\n\r\n# print(f\"Mean IoU: {mean_iou}\")\r\n# print(f\"Mean Dice Coefficient: {mean_dice}\")\r\n# print(f\"Mean Pixel Accuracy: {mean_pixel_accuracy * 100:.2f}%\")\r\n\r\n# Calculate accuracy metrics\r\n# Acc = evaluator.Pixel_Accuracy()\r\n# Acc_class = evaluator.Pixel_Accuracy_Class()\r\n# mIoU = evaluator.Mean_Intersection_over_Union()\r\n# IoU = evaluator.Intersection_over_Union()\r\n# Precision = evaluator.Pixel_Precision()\r\n# Recall = evaluator.Pixel_Recall()\r\n# F1 = evaluator.Pixel_F1()\r\n\r\n# # Print or use the metrics as needed\r\n# print(\"Pixel Accuracy:\", Acc)\r\n# print(\"Pixel Accuracy Class:\", Acc_class)\r\n# print(\"Mean IoU:\", mIoU)\r\n# print(\"IoU:\", IoU)\r\n# print(\"Precision:\", Precision)\r\n# print(\"Recall:\", Recall)\r\n# print(\"F1 Score:\", F1)\r\n\r\n","repo_name":"ZWEILIM/Semantic_Segmentation","sub_path":"opencvmask.py","file_name":"opencvmask.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28562371158","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 4 16:10:00 2019\n\n@author: kjh1\n\"\"\"\n\nN = int(input())\n\nnumbers = list(input().split())\nnumbers = list(map(int,numbers))\nnumbers.insert(0,0)\nQ = int(input())\n\ndp_arr = [0]*Q\n\nfor i in range(Q):\n se = list(input().split())\n se = list(map(int,se))\n \n if(Q>1):\n dp_arr[i] = numbers[se[0]:se[1]+1]\n else:\n dp_arr[i] = [0]\n \n\n\ndef isP(i):\n \n \n if(len(dp_arr[i]) == 1):\n print(1) \n else:\n if(len(dp_arr[i]) == 2):\n if(dp_arr[i][0] == dp_arr[i][1]):\n print(1)\n else:\n print(0)\n elif(dp_arr[i][0] == dp_arr[i][-1]):\n del dp_arr[i][0]\n del dp_arr[i][-1]\n isP(i)\n \n else:\n print(0)\n \n\nfor i in range(Q):\n isP(i)","repo_name":"kjh000/Algorithm","sub_path":"algo/백준/10942_2.py","file_name":"10942_2.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28074001659","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport os\nfrom shutil import copyfile\nimport subprocess as sp\n\nCURR_DIR = os.getcwd()\nPKG_DIR = os.path.join(CURR_DIR, \".package\")\nCALC_DIR = os.path.join(PKG_DIR, \"CalcPlusPlus\")\nLIB_DIR = os.path.join(CALC_DIR, \"libs\")\nPLATFORM_DIR = os.path.join(CALC_DIR, \"platforms\")\n\n\ndef main(args):\n # if os.path.isdir(PKG_DIR):\n # rmtree(PKG_DIR)\n\n def mkdir(dir):\n if not os.path.isdir(dir):\n os.mkdir(dir)\n\n mkdir(PKG_DIR)\n mkdir(CALC_DIR)\n mkdir(LIB_DIR)\n mkdir(PLATFORM_DIR)\n mkdir(os.path.join(CALC_DIR, \"bin\"))\n\n calcpp = os.path.join(CURR_DIR, \"UI\", \"calcpp\")\n assert os.path.isfile(calcpp)\n\n copyfile(calcpp, os.path.join(CALC_DIR, \"bin\", \"calcpp\"))\n copyfile(\n os.path.join(CURR_DIR, \".utils\", \".templates\", \"Calculator.sh\"),\n os.path.join(CALC_DIR, f\"{args.exec}\"),\n )\n\n sp.call(f\"chmod +x {os.path.join(CALC_DIR, 'bin', 'calcpp')}\", shell=True)\n sp.call(f\"chmod +x {os.path.join(CALC_DIR, args.exec)}\", shell=True)\n\n # proc = sp.Popen(\"qtchooser -print-env\", shell=True, stdout=sp.PIPE)\n # stdout, stderr = proc.communicate()\n # QT_DIR = stdout.decode(\"utf-8\").split(os.linesep)[1].split('\"')[1]\n\n proc = sp.Popen(f\"ldd {calcpp}\", shell=True, stdout=sp.PIPE)\n stdout, stderr = proc.communicate()\n depends = stdout.decode(\"utf-8\").split(os.linesep)\n for dep in depends:\n dep = dep.split(\"=>\")[-1].strip().split(\" \")[0]\n if dep.startswith(\"/usr\"):\n copyfile(dep, os.path.join(LIB_DIR, dep.split(\"/\")[-1]))\n\n depends = [\n (dep, dep.split(\"/\")[-1])\n for dep in (\n \"/lib/x86_64-linux-gnu/libz.so.1\",\n \"/usr/lib/x86_64-linux-gnu/libQt5DBus.so.5\",\n \"/usr/lib/x86_64-linux-gnu/libQt5XcbQpa.so.5\",\n \"/usr/lib/x86_64-linux-gnu/libxcb-xinerama.so.0\",\n )\n ] + [\n (\"MathEngine/libMathEngine.so\", \"libMathEngine.so\"),\n (\".includes/gsl-2.6/cblas/.libs/libgslcblas.so.0.0.0\", \"libgslcblas.so.0\"),\n (\".includes/gsl-2.6/.libs/libgsl.so.25.0.0\", \"libgsl.so.25\"),\n ]\n for dep, name in depends:\n dest = os.path.join(LIB_DIR, name)\n # if not os.path.isfile(dest):\n copyfile(dep, dest)\n\n platform_dir = \"/usr/lib/x86_64-linux-gnu/qt5/plugins/platforms\"\n for file in os.listdir(platform_dir):\n src = os.path.join(platform_dir, file)\n dest = os.path.join(PLATFORM_DIR, file)\n if not os.path.isfile(dest):\n copyfile(src, dest)\n\n if not args.no_tar:\n os.chdir(PKG_DIR)\n sp.call(f\"tar -czvf {PKG_DIR}/calcplusplus.tar.gz CalcPlusPlus/\", shell=True)\n\n # os.chdir(CURR_DIR)\n # copyfile(\n # os.path.join(PKG_DIR, \"calcplusplus.tar.gz\"),\n # \"/media/antonio/HDD-1/VirtualBox/Ubuntu1604/Shared/calcplusplus.tar.gz\",\n # )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--exec\", default=\"Calculator\", help=\"Name of Executable\")\n parser.add_argument(\n \"--no-tar\", action=\"store_true\", help=\"If given, don't generate tar\"\n )\n main(parser.parse_args())\n","repo_name":"antoniojkim/CalcPlusPlus","sub_path":"scripts/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40257033173","text":"import cv2\n\nupperbody_cascade_path = 'haarcascade_upperbody.xml'\nface_cascade_path = 'haarcascade_frontalface_default.xml'\neye_cascade_path = 'haarcascade_eye.xml'\n\nupperbody_cascade = cv2.CascadeClassifier(upperbody_cascade_path)\nface_cascade = cv2.CascadeClassifier(face_cascade_path)\neye_cascade = cv2.CascadeClassifier(eye_cascade_path)\n\nsrc = cv2.imread(\"../../../examples/media/COCO_val2014_000000000459.jpg\")\nsrc_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n\nbodys = upperbody_cascade.detectMultiScale(src_gray)\nfor x, y, w, h in bodys:\n cv2.rectangle(src, (x, y), (x + w, y + h), (0, 0, 255), 2)\n body = src[y: y + h, x: x + w]\n body_gray = src_gray[y: y + h, x: x + w]\n\n faces = face_cascade.detectMultiScale(body_gray)\n for x, y, w, h in faces:\n cv2.rectangle(body, (x, y), (x + w, y + h), (255, 0, 0), 2)\n face = src[y: y + h, x: x + w]\n face_gray = src_gray[y: y + h, x: x + w]\n\n eyes = eye_cascade.detectMultiScale(face_gray)\n for (ex, ey, ew, eh) in eyes:\n cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)\n\n\n\n\ncv2.imshow('data/dst/opencv_face_detect_rectangle.jpg', src)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"mmk-uk/OpenPose_Python","sub_path":"face_recog.py","file_name":"face_recog.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73840451464","text":"# -*- coding:utf-8 -*-\n__author__ = 'yangjian'\n\"\"\"\n\n\"\"\"\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\nfrom deeptables.models import deeptable\nfrom deeptables.datasets import dsutils\n\n\nclass Test_DeepTable_Multiclass:\n def setup_class(self):\n print(\"Loading datasets...\")\n data = dsutils.load_glass_uci()\n self.y = data.pop(10).values\n self.X = data\n\n conf = deeptable.ModelConfig(metrics=['AUC'], apply_gbm_features=False, )\n self.dt = deeptable.DeepTable(config=conf)\n self.X_train, \\\n self.X_test, \\\n self.y_train, \\\n self.y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=42)\n self.model, self.history = self.dt.fit(self.X_train, self.y_train, epochs=1)\n\n def teardown_class(self):\n print(\"Class teardown.\")\n\n def test_class_weights(self):\n conf = deeptable.ModelConfig(metrics=['AUC'], apply_gbm_features=False, apply_class_weight=True)\n dt = deeptable.DeepTable(config=conf)\n model, history = dt.fit(self.X_train, self.y_train, epochs=1)\n assert history.history['AUC'][0] > 0\n\n def test_evaluate(self):\n result = self.dt.evaluate(self.X_test, self.y_test)\n assert result['AUC'] > 0\n\n def test_predict(self):\n preds = self.dt.predict(self.X_test)\n assert len(preds.shape) == 1\n\n def test_predict_proba(self):\n proba = self.dt.predict_proba(self.X_test)\n # auc = roc_auc_score(self.y_test, proba, multi_class='ovo') # ovr\n assert proba.shape[1] == 6\n # assert auc > 0\n\n def test_proba2predict(self):\n proba = self.dt.predict_proba(self.X_test)\n preds = self.dt.predict(self.X_test)\n preds2 = self.dt.proba2predict(proba)\n # auc = roc_auc_score(self.y_test, proba, multi_class='ovo') # ovr\n\n assert proba.shape[1] == 6\n assert (preds == preds2).sum(), 43\n assert preds2.shape, (43,)\n # assert auc > 0\n","repo_name":"DataCanvasIO/DeepTables","sub_path":"deeptables/tests/models/deeptable_multiclass_test.py","file_name":"deeptable_multiclass_test.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":607,"dataset":"github-code","pt":"81"} +{"seq_id":"37799271635","text":"import sys\ninput = sys.stdin.readline\n\nN, H_ATK = map(int, input().split())\ncase = [list(map(int, input().split())) for _ in range(N)]\n\ns = 1\ne = 999999000001\n\nwhile s < e:\n mid = (s+e) // 2\n MAX_HP = mid\n CUR_HP = MAX_HP\n CUR_ATK = H_ATK\n for i in range(N):\n t, a, h = case[i]\n if t == 1:\n CUR_HP -= a * (h//CUR_ATK)\n else:\n CUR_ATK += a\n CUR_HP = min(MAX_HP, CUR_HP + h)\n if CUR_HP < 1:\n break\n if CUR_HP < 1:\n s = mid + 1\n else:\n e = mid - 1\n\nprint(mid + 1)\n\n","repo_name":"BTDnoBacon/algorithm","sub_path":"baekjoon/gold4/#16434/16434.py","file_name":"16434.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8889112558","text":"# 开发时间:2022/5/11 10:51\n# 文件名称:github_main.py\n# 备注:全量采集 apiCode 10030007\nfrom datetime import datetime\nfrom urllib.parse import urlparse\n\nimport dateparser\nimport scrapy\nfrom loguru import logger\n\nfrom GitHubAll.settings import id_key, day_crawl_key, redis_conn\nfrom config.item_config import item_main\nfrom tools.common_tools import json_path, md5\nfrom tools.proxies import queue_empty\n\n\nclass GithubMainSpider(scrapy.Spider):\n name = 'github_main'\n allowed_domains = ['github.com', 'api.github.com']\n # 采集接口(示例)\n url_interfaces = [\"https://api.github.com/repositories/70318556\"]\n\n headers = {\n 'authority': 'api.github.com',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'accept-language': 'zh-CN,zh;q=0.9',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'\n }\n\n def start_requests(self):\n while True:\n if redis_conn.llen(id_key) <= 100 or redis_conn.llen(day_crawl_key) > 50000:\n logger.info(f\"今日解析完成!gb_all:day_crawl长度:{redis_conn.llen(day_crawl_key)}\")\n break\n pipeline = redis_conn.pipeline()\n for _ in range(10000):\n pipeline.rpop(id_key)\n id_list = pipeline.execute()\n\n for _ in id_list:\n if _:\n try:\n url = f\"https://api.github.com/repositories/{_}\"\n yield scrapy.Request(url=url,\n headers=self.headers,\n callback=self.json_parse\n , meta={\n \"proxy\": queue_empty()\n }\n )\n except(Exception,):\n continue\n\n def json_parse(self, response):\n \"\"\"\n 从json接口中获取项目信息\n :param response:\n :return:\n \"\"\"\n logger.info(f\"正在解析接口页:{response.url}\")\n try:\n item = item_main().copy()\n # fork的原项目ID\n item[\"fork_original_project\"] = json_path(response.json(), '$.parent.full_name')\n # fork的根项目ID\n item[\"fork_root_project\"] = json_path(response.json(), '$.source.full_name')\n # 项目名\n item[\"project_name\"] = json_path(response.json(), '$.name')\n # 创建时间\n create_time = json_path(response.json(), '$.created_at')\n if create_time:\n item[\"create_time\"] = dateparser.parse(create_time).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n logger.error(f\"该文章无发布时间! | 接口url:{response.url}\")\n # 项目作者\n author = json_path(response.json(), '$.owner.login')\n item[\"author\"] = author\n item[\"user_id\"] = author\n # 项目标签\n tags = json_path(response.json(), '$.topics')\n item[\"tags\"] = \"#\".join(tags)\n # 项目星数\n stars_count = json_path(response.json(), '$.stargazers_count')\n item[\"stars_count\"] = str(stars_count)\n # 项目浏览数\n watch_count = json_path(response.json(), '$.subscribers_count')\n item[\"watch_count\"] = str(watch_count)\n # 项目fork数\n forks_count = json_path(response.json(), '$.forks_count')\n item[\"forks_count\"] = str(forks_count)\n # 项目简介\n item[\"abstract\"] = json_path(response.json(), '$.description')\n # 项目id\n project_id = json_path(response.json(), '$.id')\n # 项目url\n source_url = json_path(response.json(), '$.html_url')\n\n yield scrapy.Request(url=source_url,\n headers=self.headers,\n callback=self.parse,\n meta={\n \"item\": item,\n \"project_id\": project_id\n , \"proxy\": queue_empty()\n })\n\n except(Exception,) as e:\n logger.error(f\"接口页解析错误! | {response.url} | {str(e)}\")\n\n def parse(self, response, **kwargs):\n \"\"\"\n 从html中获取项目信息\n :param response:\n :return:\n \"\"\"\n logger.info(f\"正在解析实体页:{response.url}\")\n item = response.meta[\"item\"]\n project_id = response.meta[\"project_id\"]\n try:\n # 项目提交次数\n item[\"commit_count\"] = response.xpath(\"//span[@class='d-none d-sm-inline']/strong/text()\").get('').replace(\n \",\", \"\")\n # 项目贡献数\n contributors_count = response.xpath(\"//a[contains(text(), 'Contributors')]/span/text()\").get('').replace(\n \",\", \"\")\n item[\"contributors_count\"] = contributors_count\n # 项目url\n item[\"source_url\"] = response.url\n # 跳转url\n ref_url = f\"https://api.github.com/repositories/{project_id}\"\n item[\"ref_url\"] = ref_url\n # 用ref_url MD5\n item[\"uuid\"] = md5(ref_url)\n # read me内容\n item[\"readme\"] = response.xpath(\"//div[@data-target='readme-toc.content']\").xpath('string(.)').get(\"\")\n # item[\"readme_html\"] = response.xpath(json_path(config, \"$.item_info.readme\")).get(\"\")\n # 基本信息\n item[\"website_name\"] = 'GitHub'\n item[\"website_sub_name\"] = 'GitHub'\n item[\"host\"] = 'github.com'\n netloc = urlparse(response.url).netloc.replace('www.', '')\n if netloc:\n item[\"sub_host\"] = netloc\n else:\n item[\"sub_host\"] = item[\"host\"]\n item['crawler_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n item['insert_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n item['update_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if item[\"project_name\"]:\n yield item\n else:\n logger.error(f\"没有提取到有效信息!请检查页面 | {response.url}\")\n except(Exception,) as e:\n logger.error(f\"解析实体页错误! | {response.url} | {str(e)}\")\n","repo_name":"liulijun-king/liu_news","sub_path":"GitHubAll/GitHubAll/spiders/github_main.py","file_name":"github_main.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75148632265","text":"from urllib.request import urlopen\nfrom socket import timeout\nfrom json import dumps, loads\n\nAPI_KEY = '47a04b83'\nSTART_WORD = 'Batman'\nMAX_PAGE = 5\n\nsearch_dict = {}\nresult_dict = {}\n\ntotal_searches = 0\n\nclass SearchResultItem:\n def __init__(self, title, id):\n self.title = title\n self.id = id\n\nclass SearchResult:\n def __init__(self, total, items):\n self.total = total\n self.items = items\n\ndef search(word, page):\n global total_searches\n if total_searches > 99: return None\n try:\n contents = loads(urlopen('http://www.omdbapi.com/?s={}&page={}&apikey={}&type=movie'.format(word, str(page), API_KEY), timeout = 1).read())\n except:\n total_searches += 1\n return None\n total_searches += 1\n if contents['Response'] != 'True': return None\n else:\n items = []\n for item in contents['Search']:\n sri = SearchResultItem(item['Title'], item['imdbID'])\n items.append(sri)\n return SearchResult(int(contents['totalResults']), items)\n\ndef deplete_word(word):\n result = search(word, 1)\n movies = []\n if result != None:\n movies += result.items\n total = result.total\n if total >= 20:\n for i in range(2, min(int(total / 10), MAX_PAGE) + 1):\n result = search(word, i)\n if result != None:\n movies += result.items\n return movies\n\ndef process_word_recursively(word, f, count = 1):\n if count > 10: return True\n if word in search_dict: return False\n\n search_dict[word] = True\n movies = deplete_word(word)\n\n for m in movies:\n id = m.id\n if id not in result_dict:\n f.write('\"' + id + '\",\\n')\n result_dict[id] = True\n\n for m in movies:\n ts = m.title.split()\n print(ts)\n if len(ts) > 1 and process_word_recursively(ts[1], f, count + 1):\n break\n\n return False\n\n\nwith open('movies', 'w') as f:\n f.write('[')\n process_word_recursively(START_WORD, f)\n f.write(']')\n","repo_name":"ishmum123/omdb-scraper","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31077486010","text":"from math import cos, asin, sqrt,exp, log\nimport random\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport os\n\nlat_coeff=0.001\ndelta = 0.001\n\nSEED = 0\nGHz = 1000000000\nMbit = 1000000\nSECURITY_VNFS = 13 #like listed in the paper (table in the evaluation)\nMAX_CHAINS = 5\nMAX_VNFS_PER_CHAIN = 3\nMIN_LATENCY = 0.060 # seconds\nMAX_LATENCY = 0.400 # seconds\nMIN_CPU = 5\nMAX_CPU = 32\nMIN_PACKET_SIZE = 64*8 # minimum packet size in bits\nMAX_PACKET_SIZE = 1500*8 # maximum packet size in bits\nINTERNET_LATENCY = 0.010 #latency from the network border to Internet (e.g. online gaming server) in seconds\nMIN_BANDWIDTH = 100*1000 # bits per second\nMAX_BANDWIDTH = 5*Mbit # bits per second\nCPU_UNITS_PER_LINK = 1*32*2100*1000*1000 # servers * cores * frequency (cycles per second)\n\nQUEUING_DELAY_BOUND = 0.00008 # maximum queuing delay that can occur at a switch port (80us)\nSWITCH_QUEUING_DELAY = round(QUEUING_DELAY_BOUND*2,6) # queuing delay experienced by a packet when traversing a switch (2 ports)\n\ndef distance(lat1, lon1, lat2, lon2):\n p = 0.017453292519943295 #Pi/180\n a = 0.5 - cos((lat2 - lat1) * p)/2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2\n return 12742 * asin(sqrt(a)) #2*R*asin...\n\nrandom.seed(SEED)\ndef random_exponential(lambd):\n return -log(1.0 - random.random())/lambd\n\ndef sigmoid(x):\n return 1/(1+exp(-1000*(x-1)))\n\n\ndef link_latency(distance):\n propagation_delay = (float(distance)/299792)*1.5 #(km/(km/s))*(refraction index)*1000 -> latency in seconds\n #queuing_delay = 2*DATACENTER_TOTAL_QUEUING_DELAY # the packet exits one datacenter and enters the next one\n return round(propagation_delay,6)\n\ndef get_node_coordinates(nodes,label):\n for node in nodes:\n if node.label == label:\n return node.x, node.y\n\ndef get_node_index(nodes,label):\n for node in nodes:\n if nodes[node]['label'] == label:\n return node\n\n# computation of the processing delay as: L(c,i,u) = coeff1(i)*B(c) + coeff2(i)*Gu(u) + coeff3(i)*Gi(i) + Gu(u)/Gir(i)\ndef vnf_processing_delay(pn, c, i, u):\n B = c[0]['bandwidth'] # bandwidth of the chain (bits/sec)\n S = c[0]['packet_size'] # maximum packet size of the chain (bits/pkt)\n Gu = float(c[0][u]['cpu']) # CPU requirements of u (cycles/bit)\n Gi = float(pn.nodes[i]['cpu']) # CPU of node i\n Gir = float(pn.nodes[i]['residual_cpu']) # residual CPU of node i\n if Gu == 0:\n return 0\n\n #print \"old_chains_overhead: \", old_chains_overhead\n\n L = (Gu*S) / ((Gir - B*Gu) + delta) # in sec/pkt\n #print \"latency: \", L\n return L\n\ndef draw_graph(nodes, edges, graph_layout='shell',\n node_size=300, node_color='gray', node_alpha=0.3,\n node_text_size=8,\n edge_color='blue', edge_alpha=0.3, edge_tickness=1,\n text_font='sans-serif'):\n\n # create networkx graph\n G=nx.Graph()\n # add edges with (bandwidth,latency) weights\n # we esclude fake edges\n for edge in edges:\n if edge[0] != edge[1]:\n G.add_edge(edges[edge]['label'][0], edges[edge]['label'][1],weight=(edges[edge]['bandwidth'],edges[edge]['latency']))\n\n # assigning the weights to the nodes: CPU core and RAM megabytes\n for node in nodes:\n #G.node[node.label]['weight'] = node.capacity\n G.nodes[nodes[node]['label']]['label'] = nodes[node]['label']\n G.nodes[nodes[node]['label']]['pos'] = (nodes[node]['lon'],nodes[node]['lat'])\n\n # these are different layouts for the network you may try\n # shell seems to work best\n if graph_layout == 'spring':\n graph_pos=nx.spring_layout(G)\n elif graph_layout == 'spectral':\n graph_pos=nx.spectral_layout(G)\n elif graph_layout == 'random':\n graph_pos=nx.random_layout(G)\n else:\n graph_pos=nx.shell_layout(G)\n\n # draw graph\n node_labels = nx.get_node_attributes(G, 'label')\n node_pos = nx.get_node_attributes(G,'pos')\n edge_labels = nx.get_edge_attributes(G, 'weight')\n nx.draw_networkx_nodes(G,pos=node_pos,node_size=node_size,\n alpha=node_alpha, node_color=node_color)\n nx.draw_networkx_edges(G,node_pos,width=edge_tickness,\n alpha=edge_alpha,edge_color=edge_color)\n nx.draw_networkx_labels(G, node_pos,font_size=node_text_size,\n font_family=text_font,labels=node_labels)\n nx.draw_networkx_edge_labels(G, node_pos, edge_labels=edge_labels,font_size=node_text_size,\n label_pos=0.5)\n\n\n # show graph\n plt.show(block=True)\n\n\n\ndef save_result(filename,service_id,nodes,edges, chains, vnfs, embedding_cost, cpu_cost, bandwidth_cost, consumed_cpu, consumed_bandwidth,consumed_region_cpu, average_latency, nr_services, exec_time, solution_string):\n if os.path.isdir('./log') is False:\n os.mkdir(\"log\")\n\n file = open(\"log/\" + filename+\".log\", \"a\")\n file.write(str(service_id) + \" \" + str(nodes) + \" \" + str(edges) + \" \" + str(chains) + \" \" + str(vnfs) + \" \" + str(embedding_cost) + \" \" + str(cpu_cost) + \" \" + str(bandwidth_cost) + \" \" \\\n + str(consumed_cpu) + \" \" + str(consumed_bandwidth) + \" \" + str(consumed_region_cpu) + \" \" + str(average_latency) + \" \" + str(nr_services) + \" \" + str(exec_time) + \" \" + str(solution_string) + '\\n')\n file.close()\n\n","repo_name":"doriguzzi/pess-security","sub_path":"util_functions.py","file_name":"util_functions.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"13431193610","text":"import schedule\nfrom tradingAlgos import TradingAlgos\n\n\ntradingAlgos = TradingAlgos(\"AMZN\")\n\nif __name__ == '__main__':\n # This code won't run if this file is imported.\n # schedule to run this function every 5 seconds\n\n schedule.every(1.5).seconds.do(tradingAlgos.tripleThreatTrader)\n\n\n\n while True:\n # Run any pending jobs\n schedule.run_pending()\n\n","repo_name":"SohamGupta21/SSRTrade","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7830990218","text":"# -*- coding: utf-8 -*-\n# Modal dialog settings/configure window for the bigger project\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import simpledialog\n\nclass AutoScrollbar(ttk.Scrollbar):\n \"\"\" A scrollbar that hides itself if it's not needed.\n Works only if you use the grid geometry manager \"\"\"\n def set(self, lo, hi):\n if float(lo) <= 0.0 and float(hi) >= 1.0:\n self.grid_remove()\n else:\n self.grid()\n ttk.Scrollbar.set(self, lo, hi)\n\n def pack(self, **kw):\n raise tk.TclError('Cannot use pack with this widget')\n\n def place(self, **kw):\n raise tk.TclError('Cannot use place with this widget')\n\nclass MainGUI(ttk.Frame):\n \"\"\" Main GUI window \"\"\"\n def __init__(self, master):\n \"\"\" Init main window \"\"\"\n ttk.Frame.__init__(self, master=master)\n self.master.title('Main GUI')\n self.master.geometry('300x200')\n self.size = (320, 240) # size of the frame in pixels\n self.list = ['one', 'two', 'three', 'four', 'five', 'very long interesting class name', 'six',\n 'seven', 'eight', 'nine', 'ten', 'eleven', 'ok?']\n #self.state = 'disabled'\n self.state = 'normal'\n b = ttk.Button(self.master, text='Settings', command=self.open_settings)\n b.pack()\n b.focus_set()\n\n def open_settings(self):\n \"\"\" Open settings modal window \"\"\"\n s = Settings(self) # create settings object\n self.master.wait_window(s) # display the settings window and wait for it to close\n\nclass Settings(simpledialog.Dialog):\n \"\"\" Settings / configure window for bigger project \"\"\"\n def __init__(self, parent):\n \"\"\" Init settings window \"\"\"\n tk.Toplevel.__init__(self, master=parent)\n self.create_settings_window()\n self.create_widgets()\n\n def create_settings_window(self):\n \"\"\" Create setting window \"\"\"\n self.focus_set() # set focus on the settings window\n self.grab_set() # make a modal window, so all events go to settings window\n self.transient(self.master) # show only one window in the task bar\n #\n self.title('Settings') # set title\n # self.cancel gets fired when the window is destroyed\n self.protocol('WM_DELETE_WINDOW', self.cancel)\n # Set proper settings position over the parent window\n self.geometry('+{x}+{y}'.format(x = self.master.winfo_rootx() + 50,\n y = self.master.winfo_rooty() + 50))\n self.bind(\"\", self.cancel) # close when key is pressed\n\n def create_widgets(self):\n \"\"\" Widgets for settings window are created here \"\"\"\n w = 12 # width of the buttons and entry\n vcmd_size = (self.register(self.validate_size),\n '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W') # size validator\n vcmd_classname = (self.register(self.validate_classname),\n '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W') # classname validator\n #\n self.rowconfigure(0, weight=1) # make top frame extendable\n self.columnconfigure(0, weight=1)\n top = ttk.Frame(self) # upper frame with settings\n top.grid(row=0, column=0, sticky='nswe')\n top.rowconfigure(2, weight=1) # make ListBox extendable\n top.columnconfigure(1, weight=1)\n #\n # Frame size widget\n self.entry1 = tk.IntVar() # bind the entry widget to the IntVar\n self.entry2 = tk.IntVar() # bind the entry widget to the IntVar\n ttk.Label(top, text='Frame size: ').grid(row=0, column=0, sticky='w', pady=5)\n sizebox = ttk.Frame(top) # frame for roi width and height\n sizebox.grid(row=0, column=1, sticky='w', columnspan=3, pady=5)\n self.e1 = ttk.Entry(sizebox, state=self.master.state, textvariable=self.entry1,\n validate='key', validatecommand=vcmd_size) # width\n self.e1.pack(side='left')\n ttk.Label(sizebox, text='x').pack(side='left')\n self.e2 = ttk.Entry(sizebox, state=self.master.state, textvariable=self.entry2,\n validate='key', validatecommand=vcmd_size) # height\n self.e2.pack(side='left')\n #\n # New classname widget\n ttk.Label(top, text='New class name: ').grid(row=1, column=0, sticky='w')\n self.entry3 = tk.StringVar() # bind the entry widget to the StringVar\n e3 = ttk.Entry(top, width=w, textvariable=self.entry3,\n validate='key', validatecommand=vcmd_classname)\n e3.grid(row=1, column=1, columnspan=2, sticky='we')\n e3.focus_set() # set focus on this entry widget\n e3.bind('', self.add) # add classname when press Enter key\n #\n # ListBox widget\n vbar = AutoScrollbar(top, orient='vertical') # vertical and horizontal scrollbars\n hbar = AutoScrollbar(top, orient='horizontal')\n vbar.grid(row=2, column=2, sticky='ns')\n hbar.grid(row=3, column=0, sticky='we', columnspan=2)\n self.listbox = tk.Listbox(top, xscrollcommand=hbar.set, yscrollcommand=vbar.set,\n selectmode='browse') # browse == use only 1 selection at a time\n self.listbox.grid(row=2, column=0, sticky='nswe', columnspan=2, pady=5)\n self.listbox.bind('<>', self.on_select) # selection event\n self.listbox.bind('', self.remove) # deletion event\n self.listbox.bind('', self.on_focus) # focus-in event\n hbar.configure(command=self.listbox.xview) # bind scrollbars with ListBox\n vbar.configure(command=self.listbox.yview)\n #\n # Insert data into Settings window\n self.e1.configure(width=len(str(self.master.size[0]))) # set width of the entry widgets\n self.e2.configure(width=len(str(self.master.size[1])))\n self.entry1.set(self.master.size[0]) # set frame size into the entry widgets\n self.entry2.set(self.master.size[1])\n self.listbox.insert('end', *self.master.list) # fill ListBox with data\n #\n box1 = ttk.Frame(top) # top right frame container with buttons\n box1.grid(row=1, column=3, sticky='n', rowspan=2)\n self.button_add = ttk.Button(box1, width=w, state='disabled',\n text='Add', command=self.add)\n self.button_add.grid(row=0, column=0, padx=5)\n self.button_remove = ttk.Button(box1, width=w, state='disabled',\n text='Remove', command=self.remove)\n self.button_remove.grid(row=1, column=0, padx=5, pady=5)\n #\n box2 = ttk.Frame(top) # bottom right frame container with buttons\n box2.grid(row=2, column=3, sticky='s', rowspan=2)\n self.button_up = ttk.Button(box2, width=w, state='disabled',\n text='Up', command=self.up)\n self.button_up.grid(row=0, column=0, padx=5)\n self.button_down = ttk.Button(box2, width=w, state='disabled',\n text='Down', command=self.down)\n self.button_down.grid(row=1, column=0, padx=5, pady=5)\n #\n box3 = ttk.Frame(self) # bottom frame with buttons: ok, cancel and apply\n box3.grid(row=1, column=0, sticky='e')\n ttk.Button(box3, width=w, text='Ok',\n command=self.ok).pack(side='left', padx=5, pady=5)\n ttk.Button(box3, width=w, text='Cancel',\n command=self.cancel).pack(side='left', pady=5)\n self.button_apply = ttk.Button(box3, width=w, state='disabled',\n text='Apply', command=self.apply)\n self.button_apply.pack(side='left', padx=5, pady=5)\n #\n self.update_idletasks() # wait untill window is created\n self.minsize(self.winfo_width(), self.winfo_height()) # set minimal size\n\n def validate_change(self):\n \"\"\" Validate if changes are made and enable/disable Apply button \"\"\"\n if self.entry1.get() == self.master.size[0] and \\\n self.entry2.get() == self.master.size[1] and \\\n self.listbox.get(0, 'end') == tuple(self.master.list):\n self.button_apply.configure(state='disabled')\n return False\n else:\n self.button_apply.configure(state='normal')\n return True\n\n def validate_size(self, d, i, P, s, S, v, V, W):\n \"\"\" Validate only digits for the size in pixels \"\"\"\n # Validation parameters\n # %d = Type of action (1=insert, 0=delete, -1 for others)\n # %i = index of char string to be inserted/deleted, or -1\n # %P = value of the entry if the edit is allowed\n # %s = value of entry prior to editing\n # %S = the text string being inserted or deleted, if any\n # %v = the type of validation that is currently set\n # %V = the type of validation that triggered the callback\n # (key, focusin, focusout, forced)\n # %W = the tk name of the widget\n if P.isdigit() or P == '':\n # Change width of the entry widget according to the new length\n self.master.nametowidget(W).configure(width=len(str(P))) # fit the entry width\n self.after_idle(self.validate_change) # enable/disable Apply button after some time\n return True\n self.bell()\n return False\n\n def validate_classname(self, d, i, P, s, S, v, V, W):\n \"\"\" Validate string for class name \"\"\"\n for j in S: # Could enter alphanumeric or some other symbols\n if j.isalpha() or j.isdigit() or j in '., -_@':\n classname = P.strip() # strip opening and trailing spaces\n if classname and classname not in self.listbox.get(0, 'end'):\n self.button_add.configure(state='normal') # enable Add button\n else:\n self.button_add.configure(state='disabled') # disable Add button\n return True\n self.bell()\n return False\n\n def on_select(self, event=None):\n \"\"\" Enable/disable buttons on selection of the ListBox item \"\"\"\n if self.listbox.curselection(): # if selection\n self.button_remove.configure(state='normal')\n self.button_up.configure(state='normal')\n self.button_down.configure(state='normal')\n else:\n self.button_remove.configure(state='disabled')\n self.button_up.configure(state='disabled')\n self.button_down.configure(state='disabled')\n\n def on_focus(self, event=None):\n \"\"\" LisbBox obtains focus \"\"\"\n if self.listbox.curselection(): return # selection already exists\n if tk.ACTIVE: # active item exists\n self.listbox.selection_set('active') # select active item\n self.listbox.see('active') # make active item visible\n self.listbox.event_generate('<>') # generate selection event\n\n def add(self, event=None):\n \"\"\" Add classname to the list \"\"\"\n classname = self.entry3.get() # get classname from the entry\n classname = classname.strip() # strip opening and trailing spaces\n if classname and classname not in self.listbox.get(0, 'end'):\n self.listbox.insert(0, classname) # add new class name to the list\n self.entry3.set('') # empty entry widget\n self.button_add.configure(state='disabled') # disable Add button\n self.validate_change() # enable/disable Apply button\n\n def remove(self, event=None):\n \"\"\" Remove classname from the list \"\"\"\n if self.listbox.curselection(): # if selected\n index = self.listbox.curselection()[0] # get index of selected item\n self.listbox.delete(index) # delete selected item\n self.button_remove.configure(state='disabled') # disable Remove button\n self.button_up.configure(state='disabled')\n self.button_down.configure(state='disabled')\n self.validate_change() # enable/disable Apply button\n\n def up(self):\n \"\"\" Move classname upwards in the LisbBox \"\"\"\n if self.listbox.curselection(): # if selected\n index = self.listbox.curselection()[0] # get index of selected item\n self.listbox.see(index) # make selected item visible\n if index == 0: return # first item cannot go upwards\n classname = self.listbox.get(index) # get selected class name\n self.listbox.insert(index-1, classname) # move item upwards\n self.listbox.delete(index+1) # delete selected item\n self.listbox.activate(index-1) # activate moved item\n self.listbox.selection_set(index-1) # select moved item\n self.listbox.see(index-1) # make moved item visible\n self.validate_change() # enable/disable Apply button\n\n def down(self):\n \"\"\" Move classname downwards in the LisbBox \"\"\"\n if self.listbox.curselection(): # if selected\n index = self.listbox.curselection()[0] # get index of selected item\n self.listbox.see(index) # make selected item visible\n if index == self.listbox.size() - 1: return # last item cannot go downwards\n classname = self.listbox.get(index) # get selected class name\n self.listbox.insert(index+2, classname) # move item downwards\n self.listbox.delete(index) # delete selected item\n self.listbox.activate(index+1) # activate moved item\n self.listbox.selection_set(index+1) # select moved item\n self.listbox.see(index+1) # make moved item visible\n self.validate_change() # enable/disable Apply button\n\n def apply(self):\n \"\"\" Apply settings changes \"\"\"\n if self.validate_change():\n self.master.size = (self.entry1.get(), self.entry2.get())\n self.master.list = list(self.listbox.get(0, 'end'))\n self.button_apply.configure(state='disabled')\n\n def ok(self, event=None):\n \"\"\" Apply changes and close settings window \"\"\"\n self.apply()\n self.cancel()\n\n def cancel(self, event=None):\n \"\"\" Close settings window \"\"\"\n self.master.focus_set() # put focus back to the parent window\n self.destroy() # destroy settings window\n\nroot = tk.Tk()\nfeedback = MainGUI(root)\nroot.mainloop()\n","repo_name":"foobar167/junkyard","sub_path":"simple_scripts/settings_window.py","file_name":"settings_window.py","file_ext":"py","file_size_in_byte":14479,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"81"} +{"seq_id":"71335762186","text":"import pytest\n\nfrom grafanarmadillo._util import project_dict\nfrom grafanarmadillo.alerter import Alerter\nfrom grafanarmadillo.find import Finder\nfrom tests.conftest import read_json_file, requires_alerting\n\n\ndef uniquify_alert(alert, unique):\n\talert.pop(\"id\", None)\n\talert[\"uid\"] = unique\n\talert[\"ruleGroup\"] = \"ruleGroup \" + unique\n\talert[\"title\"] = \"title \" + unique\n\n\treturn alert\n\n\ndef test_import(rw_shared_grafana, unique):\n\t\"\"\"Test that we can import a dashboard.\"\"\"\n\trequires_alerting(rw_shared_grafana)\n\n\tfinder, alerter = (Finder(rw_shared_grafana[1]), Alerter(rw_shared_grafana[1]))\n\tfolder = finder.get_folder(\"f0\")\n\n\tnew_alert = uniquify_alert(read_json_file(\"alert_rule.json\"), unique)\n\n\talerter.import_alert(new_alert, folder)\n\n\tresult = alerter.api.alertingprovisioning.get_alertrule(unique)\n\tassert result[\"data\"] == new_alert[\"data\"]\n\tassert result[\"folderUID\"] == folder[\"uid\"]\n\n\ndef test_import__just_content(rw_shared_grafana, unique):\n\t\"\"\"Test that an alert with uid removed can be imported.\"\"\"\n\trequires_alerting(rw_shared_grafana)\n\n\tfinder, alerter = (Finder(rw_shared_grafana[1]), Alerter(rw_shared_grafana[1]))\n\tfolder = finder.get_folder(\"f0\")\n\n\tnew_alert = uniquify_alert(read_json_file(\"alert_rule.json\"), unique)\n\tdel new_alert[\"uid\"] # The important part\n\n\talerter.import_alert(new_alert, folder)\n\n\tresult = finder.get_alert(\"f0\", \"title \" + unique)\n\tassert result[\"data\"] == new_alert[\"data\"]\n\tassert result[\"folderUID\"] == folder[\"uid\"]\n\n\ndef test_import__update(rw_shared_grafana, unique):\n\t\"\"\"Test that importing for an existing dashboard overwrite.\"\"\"\n\trequires_alerting(rw_shared_grafana)\n\n\tfinder, alerter = (Finder(rw_shared_grafana[1]), Alerter(rw_shared_grafana[1]))\n\tfolder = finder.get_folder(\"f0\")\n\tnew_alert = uniquify_alert(read_json_file(\"alert_rule.json\"), unique)\n\talerter.import_alert(new_alert, folder)\n\n\tnew_alert[\"isPaused\"] = not new_alert[\"isPaused\"] # modify the alert\n\talerter.import_alert(new_alert, folder)\n\n\tresult = finder.get_alert(\"f0\", \"title \" + unique)\n\tassert result[\"isPaused\"] == new_alert[\"isPaused\"]\n\n\ndef test_importexport__roundtrip(rw_shared_grafana, unique):\n\t\"\"\"Test that we can import a dashboard and the export is the same.\"\"\"\n\tif rw_shared_grafana[0].major_version < 9:\n\t\tpytest.skip(\"Grafana does not support provisioning in version 8\")\n\n\tfinder, alerter = (Finder(rw_shared_grafana[1]), Alerter(rw_shared_grafana[1]))\n\n\tfolder_name = \"f0\"\n\ttarget_folder = finder.get_folder(folder_name)\n\n\tnew_alert = read_json_file(\"alert_rule.json\")\n\tnew_alert = uniquify_alert(new_alert, unique)\n\n\talerter.import_alert(new_alert, target_folder)\n\n\talert_search_result = finder.get_alert(folder_name, \"title \" + unique)\n\texported_alert, exported_folder = alerter.export_alert(alert_search_result)\n\n\tdef coerce_comparable(a):\n\t\tnoncomparables = {\"id\", \"provenance\", \"folderUID\", \"updated\"}\n\t\treturn project_dict(a, noncomparables, inverse=True)\n\n\tassert coerce_comparable(exported_alert) == coerce_comparable(new_alert)\n\n\tassert target_folder[\"uid\"] == exported_folder[\"uid\"]\n","repo_name":"lilatomic/grafanarmadillo","sub_path":"tests/test_alerter.py","file_name":"test_alerter.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"15303312562","text":"\n#!/usr/bin/env python3\n\nimport sys\nimport pysam\nimport natsort\nimport argparse\nimport numpy as np\nimport himut.util\nimport himut.gtlib\nimport himut.bamlib\nimport himut.caller\nimport multiprocessing as mp\nfrom collections import defaultdict\nfrom typing import Dict, List, Tuple\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-i\",\n \"--bam\",\n type=str,\n required=True,\n help=\"BAM file to read\",\n )\n parser.add_argument(\n \"--region\",\n type=str,\n required=False,\n help=\"target chromosome\",\n )\n parser.add_argument(\n \"--region_list\",\n type=str,\n required=False,\n help=\"list of target chromosomes separated by new line\",\n )\n parser.add_argument(\n \"--min_gq\",\n type=int,\n default=20,\n required=False,\n help=\"minimum germline genotype quality (GQ) score \",\n )\n parser.add_argument(\n \"--min_mapq\",\n type=int,\n default=60,\n required=False,\n help=\"minimum mapping quality (MAPQ) score\",\n )\n parser.add_argument(\n \"--germline_snv_prior\",\n type=float,\n default=1/(10**3),\n required=False,\n help=\"germline snv prior\",\n )\n parser.add_argument(\n \"-t\",\n \"--threads\",\n type=int,\n default=1,\n required=False,\n help=\"number of threads to use\",\n )\n parser.add_argument(\n \"-o\",\n \"--out\",\n type=str,\n required=True,\n help=\"file to write\",\n )\n args = args[1:]\n return parser.parse_args(args)\n\n\ndef init_allelecounts():\n rpos2allelecounts = defaultdict(lambda: np.zeros(6))\n rpos2allele2bq_lst = defaultdict(lambda: {0: [], 1: [], 2: [], 3: []})\n return rpos2allelecounts, rpos2allele2bq_lst\n\n\ndef update_allelecounts(\n ccs,\n rpos2allelecounts: Dict[int, np.ndarray],\n rpos2allele2bq_lst: Dict[int, Dict[int, List[int]]],\n):\n\n tpos = ccs.tstart\n qpos = ccs.qstart\n for (state, ref, alt, ref_len, alt_len) in ccs.cstuple_lst:\n if state == 1: # match\n for i, alt_base in enumerate(alt):\n epos = tpos + i\n bidx = himut.util.base2idx[alt_base]\n rpos2allelecounts[epos][bidx] += 1\n rpos2allele2bq_lst[epos][bidx].append(ccs.bq_int_lst[qpos + i])\n elif state == 2: # sub\n bidx = himut.util.base2idx[alt]\n rpos2allelecounts[tpos][bidx] += 1\n rpos2allele2bq_lst[tpos][bidx].append(ccs.bq_int_lst[qpos])\n elif state == 3: # insertion\n rpos2allelecounts[tpos][4] += 1\n elif state == 4: # deletion\n for j in range(len(ref[1:])):\n rpos2allelecounts[tpos + j][5] += 1\n tpos += ref_len\n qpos += alt_len\n\n\ndef get_count_per_qname(\n chrom: str,\n chunkloci_list: List[Tuple[str, int, int]],\n bam_file: str,\n min_gq: int,\n min_mapq: int,\n germline_snv_prior: float,\n chrom2qname2qv: Dict[str, Dict[str, float]],\n chrom2qname2count: Dict[str, Dict[str, int]],\n):\n\n qname2qv = defaultdict()\n qname2count = defaultdict(lambda: 0)\n himut.gtlib.init(germline_snv_prior)\n alignments = pysam.AlignmentFile(bam_file, \"rb\")\n for (chrom, chunk_start, chunk_end) in chunkloci_list: \n chunk_tsbs_lst = []\n qname2tsbs_lst = {}\n rpos2allelecounts, rpos2allele2bq_lst= init_allelecounts() \n for i in alignments.fetch(chrom, chunk_start, chunk_end): # iterate through reads\n ccs = himut.bamlib.BAM(i)\n if not ccs.is_primary:\n continue\n update_allelecounts(ccs, rpos2allelecounts, rpos2allele2bq_lst)\n if himut.caller.is_low_mapq(ccs.mapq, min_mapq):\n continue\n ccs.cs2subindel()\n qname2count[ccs.qname] = 0\n qname2qv[ccs.qname] = ccs.get_qv()\n chunk_tsbs_lst.extend(ccs.tsbs_lst)\n qname2tsbs_lst[ccs.qname] = ccs.tsbs_lst\n \n germ_tsbs_set = set() \n for (tpos, ref, alt) in natsort.natsorted(list(set(chunk_tsbs_lst))): # iterate through substitutions\n rpos = tpos - 1\n tsbs = tpos, ref, alt\n som_gt = \"{}{}\".format(ref, alt)\n allele2bq_lst = rpos2allele2bq_lst[rpos]\n _, _, germ_gt_state, gt2gt_state = himut.gtlib.get_germ_gt(ref, allele2bq_lst)\n if germ_gt_state == \"homref\":\n continue\n germ_gq = himut.gtlib.get_germ_gq(som_gt, gt2gt_state, allele2bq_lst)\n if himut.caller.is_low_gq(germ_gq, min_gq): \n continue\n germ_tsbs_set.add(tsbs)\n \n for qname, tsbs_lst in qname2tsbs_lst.items(): \n for tsbs in tsbs_lst:\n if tsbs in germ_tsbs_set:\n continue\n qname2count[qname] += 1\n\n chrom2qname2qv[chrom] = dict(qname2qv)\n chrom2qname2count[chrom] = dict(qname2count)\n\n\ndef dump_count_per_qname(\n bam_file: str,\n region: str,\n region_list: str,\n min_gq: int,\n min_mapq: int,\n germline_snv_prior: float,\n threads: int,\n out_file: str\n): \n\n p = mp.Pool(threads)\n manager = mp.Manager()\n chrom2qname2qv = manager.dict()\n chrom2qname2count = manager.dict()\n _, tname2tsize = himut.bamlib.get_tname2tsize(bam_file)\n chrom_lst, chrom2chunkloci_lst = himut.util.load_loci(region, region_list, tname2tsize)\n get_count_per_qname_arg_lst = [\n (\n chrom,\n chrom2chunkloci_lst[chrom],\n bam_file,\n min_gq,\n min_mapq,\n germline_snv_prior,\n chrom2qname2qv,\n chrom2qname2count,\n )\n for chrom in chrom_lst\n ]\n p.starmap(\n get_count_per_qname, get_count_per_qname_arg_lst,\n )\n p.close()\n p.join()\n\n o = open(out_file, \"w\") # return\n o.write(\"{}\\t{}\\t{}\\n\".format(\"qname\", \"qv\", \"count\"))\n for chrom in chrom_lst:\n for qname, qv in chrom2qname2qv[chrom].items():\n count = chrom2qname2count[chrom][qname] \n o.write(\"{}\\t{}\\t{}\\n\".format(qname, qv, count)) \n o.close()\n\n\ndef main():\n options = parse_args(sys.argv)\n dump_count_per_qname(\n options.bam,\n options.region,\n options.region_list, \n options.min_gq,\n options.min_mapq,\n options.germline_snv_prior,\n options.threads, \n options.out\n )\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sjin09/himut","sub_path":"scripts/ccs2sbs_count.py","file_name":"ccs2sbs_count.py","file_ext":"py","file_size_in_byte":6645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29543427515","text":"from django.urls import path\r\nfrom corepages.views import *\r\n\r\nurlpatterns = [\r\n path('home/', home, name='home'),\r\n path('about/', about, name='about'),\r\n path('contact/', contact, name='contact'),\r\n path('search/', search, name='search'),\r\n path('search/', search, name='search'),\r\n path('legal/', legal, name='legal'),\r\n path('stats/', lambda req: HttpResponseRedirect('/statistics/'), name='stats'),\r\n path('statistics/', stats, name='stats'),\r\n path('brand-resources/', brand_resources, name='brand_resources'),\r\n path('', home, name='home'),\r\n]","repo_name":"ewen-lbh/portfolio-v2","sub_path":"corepages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9071066766","text":"import prettytensor as pt\nimport tensorflow as tf\n\ndef softmax_mlp(input_data, output_size, layers=[32,32], activation=tf.nn.relu):\n\n net = pt.wrap(input_data).sequential()\n for l_size in layers:\n net.fully_connected(l_size, activation_fn=activation) \n output_layer, _ = net.softmax_classifier(output_size)\n\n return output_layer\n\n\ndef softmax_conv(input_data, output_size, kernels=[3,3], depths=[16,32], strides=[1,1], fc_layers=[32], activation=tf.nn.relu):\n\n net = pt.wrap(input_data).sequential()\n count = 0\n for k, d, s in zip(kernels, depths, strides):\n net.conv2d(k, d, stride=s, activation_fn=activation, name=\"conv_\" + str(count))\n count += 1\n net.flatten()\n for i, fc in enumerate(fc_layers):\n net.fully_connected(fc, activation_fn=activation, name=\"fc_\" + str(i))\n output_layer, _ = net.softmax_classifier(output_size)\n\n return output_layer\n\n\ndef softmax_lstm(input_data, output_size, timesteps, fc_layers=[32], lstm_layers=[32,32], activation=tf.nn.relu):\n\n net = pt.wrap(input_data).sequential()\n for l_size in fc_layers:\n net.fully_connected(l_size, activation_fn=activation) \n net.cleave_sequence(timesteps)\n for l_size in lstm_layers:\n net.sequence_lstm(l_size)\n net.squash_sequence()\n output_layer, _ = net.softmax_classifier(output_size)\n\n return output_layer\n\n\n\ndef dqn(input_data, output_size, kernels=[8,4,3], depths=[32,64,64], strides=[4,2,1], fc_layers=[512], activation=tf.nn.relu):\n \n net = pt.wrap(input_data).sequential()\n for k, d, s in zip(kernels, depths, strides):\n net.conv2d(k, d, stride=s, activation_fn=activation)\n net.flatten()\n for fc in fc_layers:\n net.fully_connected(fc, activation_fn=activation)\n output_layer = net.fully_connected(output_size)\n return output_layer\n\n","repo_name":"sisl/rltools","sub_path":"rltools/models/tf/simple_models.py","file_name":"simple_models.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"37004570224","text":"#Faça um programa que use a função valorPagamento para determinar o valor a ser pago por uma prestação de uma conta. O programa deverá solicitar ao #usuário o valor da prestação e o número de dias em atraso e passar estes valores para a função valorPagamento, que calculará o valor a ser pago e devolverá este valor ao programa que a chamou. O programa deverá então #exibir o valor a ser pago na tela. Após a execução o programa deverá voltar a pedir outro valor de prestação e assim continuar até que seja informado um valor igual a zero para a prestação. Neste momento o programa deverá ser encerrado, exibindo o relatório do dia, que conterá a quantidade e o valor total de prestações pagas no dia. O cálculo do valor a ser pago é feito da seguinte forma. Para pagamentos sem atraso, cobrar o valor da prestação. Quando houver atraso, cobrar 3% de multa, mais 0,1% de juros por dia de atraso.#\n\n\ndef controladorPrestacao():\n contador = 0\n valorTotal = 0\n valorPrestacao = 0\n resposta = 'S'\n while resposta == 'S':\n valorPrestacao = entradaDados()\n contador += 1\n valorTotal = valorPrestacao + valorTotal\n resposta = coletarResposta()\n impressaoFinal(valorTotal, contador)\n \ndef entradaDados():\n valorPrestacao = float(input('\\nDigite o valor da prestação: '))\n diasAtraso = int(input('\\nDigite os dias de atraso: ')) \n valorPrestacao = calcularPrestacao(valorPrestacao, diasAtraso)\n return valorPrestacao\n \ndef calcularPrestacao(valorPrestacao, diasAtraso):\n if diasAtraso == 0:\n valorPrestacao = exibirPrestacaoEmDia(valorPrestacao)\n return valorPrestacao\n else:\n valorPrestacaoEmAtraso = exibirPrestacaoEmAtraso(valorPrestacao, diasAtraso)\n return valorPrestacaoEmAtraso\n \n\ndef exibirPrestacaoEmDia(valorPrestacao):\n print(f'\\nO valor da Prestação é R$ ', valorPrestacao)\n return valorPrestacao\n\ndef exibirPrestacaoEmAtraso(valorPrestacao, diasAtraso):\n multa = valorPrestacao * 0.03\n juros = valorPrestacao * 0.001 * diasAtraso\n valorPrestacaoComJuros = valorPrestacao + multa + juros\n print(f'\\nO valor da Prestacao é R$ ',{valorPrestacaoComJuros})\n return valorPrestacaoComJuros \n \ndef coletarResposta():\n resposta = input('\\nDeseja continuar? (S/N) ').upper()\n return resposta\n\ndef impressaoFinal(valorTotal, contador):\n print(f'\\nO valor total de prestações é R$ {valorTotal}')\n print(f'\\nO total de prestações pagas foram {contador}')\n\n \n#Programa Principal\ncontroladorPrestacao()\n","repo_name":"benhur1920/Exercicio07FuncoesPython","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8994407145","text":"\"\"\" \n Read Gimp .ggr gradient files.\n Ned Batchelder, http://nedbatchelder.com\n This code is in the public domain.\n\"\"\"\n\n__version__ = '1.0.20070915'\n\nimport colorsys, math\n\n\nclass GimpGradient:\n \"\"\" \n Read and interpret a Gimp .ggr gradient file.\n \"\"\"\n def __init__(self, f=None):\n if f:\n self.read(f)\n \n class _segment:\n pass\n \n def read(self, f):\n \"\"\" \n Read a .ggr file from f (either an open file or a file path).\n \"\"\"\n if isinstance(f, basestring):\n f = file(f)\n if f.readline().strip() != \"GIMP Gradient\":\n raise Exception(\"Not a GIMP gradient file\")\n line = f.readline().strip()\n if not line.startswith(\"Name: \"):\n raise Exception(\"Not a GIMP gradient file\")\n self.name = line.split(\": \", 1)[1]\n nsegs = int(f.readline().strip())\n self.segs = []\n for i in range(nsegs):\n line = f.readline().strip()\n seg = self._segment()\n (seg.l, seg.m, seg.r,\n seg.rl, seg.gl, seg.bl, _,\n seg.rr, seg.gr, seg.br, _,\n seg.fn, seg.space) = map(float, line.split())\n self.segs.append(seg)\n \n def color(self, x):\n \"\"\" \n Get the color for the point x in the range [0..1].\n The color is returned as an rgb triple, with all values in the range [0..1].\n \"\"\"\n # Find the segment.\n for seg in self.segs:\n if seg.l <= x <= seg.r:\n break\n else:\n # No segment applies! Return black I guess.\n return (0,0,0)\n\n # Normalize the segment geometry.\n mid = (seg.m - seg.l)/(seg.r - seg.l)\n pos = (x - seg.l)/(seg.r - seg.l)\n \n # Assume linear (most common, and needed by most others).\n if pos <= mid:\n f = pos/mid/2\n else:\n f = (pos - mid)/(1 - mid)/2 + 0.5\n\n # Find the correct interpolation factor.\n if seg.fn == 1: # Curved\n f = math.pow(pos, math.log(0.5) / math.log(mid));\n elif seg.fn == 2: # Sinusoidal\n f = (math.sin((-math.pi/2) + math.pi*f) + 1)/2\n elif seg.fn == 3: # Spherical increasing\n f -= 1\n f = math.sqrt(1 - f*f)\n elif seg.fn == 4: # Spherical decreasing\n f = 1 - math.sqrt(1 - f*f);\n\n # Interpolate the colors\n if seg.space == 0:\n c = (\n seg.rl + (seg.rr-seg.rl) * f,\n seg.gl + (seg.gr-seg.gl) * f,\n seg.bl + (seg.br-seg.bl) * f\n )\n elif seg.space in (1,2):\n hl, sl, vl = colorsys.rgb_to_hsv(seg.rl, seg.gl, seg.bl)\n hr, sr, vr = colorsys.rgb_to_hsv(seg.rr, seg.gr, seg.br)\n\n if seg.space == 1 and hr < hl:\n hr += 1\n elif seg.space == 2 and hr > hl:\n hr -= 1\n\n c = colorsys.hsv_to_rgb(\n (hl + (hr-hl) * f) % 1.0,\n sl + (sr-sl) * f,\n vl + (vr-vl) * f\n )\n return c\n ","repo_name":"remig/lic","sub_path":"src/GimpParser.py","file_name":"GimpParser.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"31374698732","text":"# 1은 길, 0은 괴물\n# BFS사용\n# 괴물을 피해 미로 탈출구에 도착하는 문제(탈출구는 배열의 마지막)\n# 최단 경로 구하기\nimport sys\nfrom collections import deque\n\ndef Maze(x,y):\n # 상하좌우\n dx = [-1,1,0,0]\n dy = [0,0,-1,1]\n\n queue = deque()\n queue.append((x,y))\n\n while queue:\n x, y = queue.popleft()\n for i in range(0,4):\n nx = x+dx[i]\n ny = y+dy[i]\n # 범위를 벗어난 경우\n if nx <= -1 or nx >= n or ny <= -1 or ny >=m:\n continue\n # 괴물을 만난 경우\n if graph[nx][ny] == 0:\n continue\n # 탐색하지 않은 경로에 대해서만 갱신\n if graph[nx][ny] == 1:\n queue.append((nx,ny))\n graph[nx][ny] = graph[x][y]+1\n \n return graph[n-1][m-1]\n\n\nn, m = map(int, sys.stdin.readline().strip().split())\n\ngraph = []\nfor i in range(0,n):\n graph.append(list(map(int, sys.stdin.readline().strip())))\n\n\nresult = Maze(0,0)\nprint(result)\n\n\"\"\"\n5 6\n101010\n111111\n000001\n111111\n111111\n\"\"\"\n\n\"\"\"\n1,1 시작지점부터 bfs를 사용하여 거리를 갱신하며 출구까지 탐색\n방문하지 않은 노드가 있다면 해당 노드를 기준으로 상,하,좌,우를 탐색하고 거리를 갱신\n\"\"\"\n","repo_name":"Jeon-Jae-woo/CodingTest","sub_path":"codingTestBook/DFS/BFS/미로탈출.py","file_name":"미로탈출.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18410241125","text":"\nimport random\nrock = ''' \n _______\n---' ____) \n (_____) \n (_____) \n (____)\n---.__(___) \n'''\n\npaper = ''' \n _______\n---' ____)____ \n ______) \n _______) \n _______)\n---.__________) \n'''\n\nscissors = ''' \n _______\n---' ____)____ \n ______) \n __________) \n (____)\n---.__(___) \n''' \ngame_img = [rock, paper, scissors]\n\nuser = int(input(\"What do you choose? Type 0 for Rock, 1 for Paper or 2 for scissors. \\n\"))\nif user >= 3 or user < 0:\n print(\"You type an invalid number\")\nelse:\n print(game_img[user])\n\n computer = random.randint(0,2)\n print(\"Computer chose:\")\n print(game_img[computer])\n\n if user == computer :\n print(\"it's a draw\")\n elif ( user == 0 and computer == 1 ) or ( user == 1 and computer == 2 ) or ( user == 2 and computer == 0 ):\n print(\"You lose\")\n elif ( user == 0 and computer == 2 ) or ( user == 2 and computer == 1 ) or ( user == 1 and computer == 0 ):\n print(\"You win\")\n \n\nprint(\"This repl has exited, |run again|\")\n","repo_name":"PritamKhan/python_projects","sub_path":"Rock Paper Scissors.py","file_name":"Rock Paper Scissors.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73286400266","text":"import os\n\nfrom code import BaseError\nfrom shop_enum import CreateType, ZipType, CreateAuthority\nfrom backend_extensions import check_is_admin, check_if_member\n\n\ndef check_create_permissions(username, create_type):\n is_member = check_if_member(username)\n if not is_member:\n raise BaseError(\"1005\")\n if create_type != CreateType.ZIP.value:\n raise BaseError(\"1005\")\n\n return True\n\n\ndef check_if_creatable(parent_id):\n if not isinstance(parent_id, int):\n parent_id = int(parent_id)\n\n print(\"Pid\", parent_id)\n\n if parent_id == ZipType.ACTOR.value:\n is_creatable = CreateAuthority.ACTOR.value\n\n elif parent_id == ZipType.SCENE.value:\n is_creatable = CreateAuthority.SCENE.value\n\n elif parent_id == ZipType.ACTION.value:\n is_creatable = CreateAuthority.ACTION.value\n\n elif parent_id == ZipType.SHOT.value:\n is_creatable = CreateAuthority.SHOT.value\n\n else:\n raise BaseError(\"1002\")\n\n if not is_creatable:\n raise BaseError(\"1005\")\n\n\ndef check_access_log(access_log):\n\n data = list()\n\n with open(access_log, \"r\") as f:\n while True:\n line = f.readline()\n if not line:\n break\n\n line_tmp = line.strip(\"\\n\").split(\" \")\n ip_addr = line_tmp[-1]\n access_times = line_tmp[-2]\n data.append([ip_addr, access_times])\n\n return data\n\n","repo_name":"Topaz1618/MeowShop","sub_path":"backend_utils.py","file_name":"backend_utils.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23166298406","text":"import cv2 as cv\nfrom PIL import Image\nimport os\nfrom numpy import where, array\nimport shutil\n\n#Target dimensions of result image in centimeters\nTARGET_HEIGHT_CM = 15.2\nTARGET_WIDTH_CM = 10.2\n\n#Target DPI of photo\nDPI = 300\n\n#Target ratio of the face size to the height of the image in percentage\nTARGET_OVERHEAD_PERCENT = 0.38\nTARGET_UNDERHEAD_PERCENT = 0.92\n\nhaar_casc = cv.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ndef MaxFace(face_scaled, typ):\n\n #Use canny to get black and white edges of body/face\n canny = cv.Canny(face_scaled, 120,230)\n\n #Using numpy to select all white pixels (edges)\n indices = where(canny != [0])\n typ2 = typ\n\n if typ==2:\n typ2=1\n indices1 = list(indices[typ2])\n\n #Get index of white pixel located furthest to the top\n if typ==0:\n indx = indices1.index(min(indices1))\n\n #Get index of white pixel located furthest to the right \n elif typ==1:\n indx = indices1.index(max(indices1))\n\n #Get index of white pixel located furthest to the left\n elif typ==2:\n typ = 1\n indx = indices1.index(min(indices1))\n\n return indices[typ][indx]\n\ndef GetFace(face_source, grayscale2, i2, scale2):\n\n #Function used to detect face if it wasn't found at first attempt\n #or remove all extra smallest areas (potential faces) and leave the biggest one\n\n if len(face_source)==0:\n\n #If face wasn't found, resize image by 0.75\n grayscale2 = cv.resize(grayscale2, (0,0), fx=0.75, fy=0.75, interpolation=cv.INTER_AREA)\n face_source = haar_casc.detectMultiScale(grayscale2, 1.1, i2, minSize=(int(0.12*grayscale2.shape[0]), int(0.12*grayscale2.shape[0])))\n scale2 *= 0.75\n\n while len(face_source)==0 and i2>4:\n #If face was't found try to decrement minimun neighbours paremeter\n i2 -= 1\n face_source = haar_casc.detectMultiScale(grayscale2, 1.1, i2, minSize=(int(0.15*grayscale2.shape[0]),int(0.15*grayscale2.shape[0])))\n \n #Find biggest area and return it's coordinates with new scale\n if len(face_source)>1:\n dimensions=[]\n \n for (x,y,w,h) in face_source:\n dimensions.append(w*h)\n\n indx = dimensions.index(max(dimensions))\n face_source = array([face_source[indx]])\n return scale2, face_source\n\ndef CropImage(reject_path, save_path, directory):\n\n #Replace DPI to Dots Per Centimeter \n DPCM = DPI / 2.54\n \n #Calculate target height and width of final picture in pixels\n target_height_px = round(TARGET_HEIGHT_CM * DPCM,0)\n target_width_px = round(TARGET_WIDTH_CM * DPCM,0)\n\n #Calculate target ratio that final image should have\n target_ratio = round(target_height_px / target_width_px,8)\n\n for photo in os.listdir(directory):\n if photo.endswith(\".JPG\") or photo.endswith(\".jpg\"):\n \n #Read every picture with .jpg extenstion from directory\n #If picture couldn't be read, copy it to rejected photos directory\n try:\n img = cv.imread(directory+'/'+photo)\n height, width = img.shape[:2]\n\n except:\n shutil.copy2(os.path.join(directory,photo), os.path.join(reject_path,photo))\n yield 1\n continue\n\n #Scale original picture (200px width) to detect the face faster and convert to grayscale\n scale = round((200/width),6)\n img_scaled = cv.resize(img, (0,0), fx=scale, fy=scale, interpolation=cv.INTER_AREA)\n grayscale = cv.cvtColor(img_scaled, cv.COLOR_BGR2GRAY)\n\n #Detect face using Haar Cascade and pass it to function GetFace\n #variable face contains array of found conrdinates (x, y, width, height)\n face = haar_casc.detectMultiScale(grayscale, 1.1, 5)\n scale2 ,face = GetFace(face, grayscale, 5,scale)\n\n #Assign new scale value and resize original image if scale value was changed\n if scale2!=scale:\n scale = scale2\n img_scaled = cv.resize(img, (0,0), fx=scale, fy=scale, interpolation=cv.INTER_AREA)\n\n #Copy photo to rejected directory if the face was still not detected\n if len(face)==0 or (face[0][2]/(height*scale))<0.18:\n shutil.copy2(os.path.join(directory,photo), os.path.join(reject_path,photo))\n yield 1\n continue\n \n #Get position of face and its size (width == height)\n face_x = face[0][0]\n face_y = face[0][1]\n face_size = face[0][2]\n\n #Remove extra free space (on the left and right) to meet the ratio \n #ALso removes all unnecessary gradients or shadows\n side = round(((img_scaled.shape[0]/target_ratio) - face_size)/2, 0)\n tmp_c = face_x - side\n tmp_d = face_x + side + face_size\n\n if tmp_c<0:\n tmp_c = 0\n \n if tmp_d>img_scaled.shape[1]:\n tmp_d = img_scaled.shape[1]\n\n #Create two separate fragments of scaled image\n #Get face with above free space and 5% extra on both sides\n img_over = img_scaled[0 : face_y+face_size, int(face_x*0.95) : int((face_x+face_size)*1.05)]\n\n #Get 65% of face and whole width of the image\n img_side = img_scaled[face_y : face_y+int(face_size*0.65), int(tmp_c) : int(tmp_d)]\n \n #Get x value of face (edge of the face - furthest to the left) \n max_face_x = round(MaxFace(img_side, 1)/scale,0) + round(tmp_c/scale,0)\n\n #Get x value of face (edge of the face - furthest to the right) \n min_face_x = round(MaxFace(img_side,2)/scale,0) + round(tmp_c/scale,0)\n\n #Get y value of face (top of the head) \n max_face_y = round(MaxFace(img_over, 0)/scale,0)\n\n #Divide each scaled value to fit original picture\n face_x = round(face_x/scale,0)\n face_y = round(face_y/scale,0)\n face_size = round(face_size/scale,0)\n \n #Amount of pixels that should be left above the head\n overhead_px = round((TARGET_OVERHEAD_PERCENT*face_size) + ((face_y - max_face_y)/2),0)\n\n #Amount of pixels that should be left at the bottom of the head\n underhead_px = round(TARGET_UNDERHEAD_PERCENT*face_size,0)\n\n #a - amount of pixels which should be cut from the top (of the image)\n #b - amout of pixels which should be cut from the bottom\n #c - amout of pixels which should be cut from the left side\n #d - amout of pixels which should be cut from the right side\n a = face_y - overhead_px\n b = face_y + face_size + underhead_px\n c = 0\n d = width\n\n if a<0:\n a = 0\n \n if b>height:\n b = height\n\n #Get percentage of the space above the head in relation to height of the image\n overhead_ratio = (max_face_y - a)/(b - a) \n\n #Increase space if it's too low\n if overhead_ratio<0.065:\n a = max_face_y-round(0.068*(b - a))\n if a<0:\n a = 0\n\n #Decrease space if it's too big\n if overhead_ratio>0.075:\n a = max_face_y-round(0.075*(b - a))\n\n if a<0:\n a = 0\n\n #Get percentage of the space under the head in relation to height of the image\n underhead_ratio = (b - ((face_y + face_size)))/(b - a)\n\n if underhead_ratio>0.33:\n b=(face_y + face_size) + round(0.33*(b - a))\n\n if b>height:\n b = height\n\n face_width = max_face_x - min_face_x\n\n #Remove extra space on the left and right \n side = round(((b - a) - (target_ratio*face_width))/target_ratio,0)\n\n if side%2!=0:\n side = int(side/2)\n c = min_face_x - (side + 1)\n d = max_face_x + side\n\n else:\n c = min_face_x - (side/2)\n d = max_face_x + (side/2)\n\n #Increase c if it's to small\n if c>face_x:\n c = int(0.95*face_x)\n \n if c>min_face_x:\n c = int(0.95*min_face_x)\n\n #Check values\n if c<0:\n c = 0\n \n if d>width:\n d = width\n\n #Get width of left side of the image\n left_side = face_x - c\n\n #Get width of right side of the image\n right_side = d - (face_x + face_size)\n\n #Center the face if positioned too much to the right\n #Face can't be right in the middle of the image\n #Often head in the picture is turned to the left or right\n\n if left_sideheight:\n #If adding the diffrence creates incorrect value then adjust the width\n x += round(((b + hdiff) - height)/target_ratio, 0)\n b = height\n \n else:\n b += hdiff\n\n c += int(x/2)\n d -= (int(x/2) + 1)\n\n else:\n #Get amount of pixels (height) needed to match target ratio\n hdiff = round((d - c)*target_ratio, 0) - (b - a)\n\n #If adding the diffrence creates incorrect value then adjust the width\n if (b+hdiff)>height:\n y = round(((b + hdiff) - height)/target_ratio, 0)\n c += int(y/2)\n d -= (int(y/2) + 1)\n b = height\n \n else:\n b += hdiff\n \n #Current height to width ratio is too big\n #Decrease height or increase width depending on size of the face in final picture\n elif current_ratio>target_ratio:\n \n if face_ratio>0.86:\n #Increase width if face size is too big\n #Face width should be from 66% to 86% of the whole width\n\n #Get difference of width needed to be added\n wdiff = round(((b - a)/target_ratio) - (d - c), 0)\n\n if face_width/((d - c) + wdiff)>0.86:\n\n if c - (int(wdiff/2)+1)<0:\n wdiff = c*2\n\n if (d + int(c/2) + 1)>width:\n wdiff = ((d + int(wdiff/2)) - width)*2\n \n elif (d + int(wdiff/2) + 1)>width:\n wdiff = ((d + int(wdiff/2)) - width)*2\n \n if (c - int(wdiff/2))<0:\n wdiff = c*2\n\n c -= int(wdiff/2)\n d += int(wdiff/2)\n \n else:\n x = round(face_width/0.86) - (d - c)\n\n if c-int(x/2)<0:\n x = c*2\n\n if d+int(c/2)>width:\n x = ((d + int(x/2)) - width)*2\n \n elif d+int(x/2)>width:\n x = ((d + int(x/2)) - width)*2\n\n if c-int(x/2)<0:\n x = c*2\n \n c -= int(x/2)\n d += int(x/2)\n\n #Adjust only height if face ratio is ok \n hdiff = abs((b - a) - round((d - c)*target_ratio, 0))\n b -= hdiff\n\n #Final adjustments to face ratio\n if (face_width/(d-c))<0.66:\n x = (d - c) - round(face_width/0.66,0)\n y = round(x*target_ratio, 0)\n\n if ((b - y) - (face_y + face_size))/((b - a) - y)<0.31:\n y = abs(round((((face_y + face_size) + (0.31*(b - a))) - b)/1.31,0))\n x = round(y/target_ratio,0)\n \n b -= y\n\n if x%2==0:\n c += x/2\n d -= x/2\n\n else:\n c += int(x/2)\n d -= (int(x/2) + 1)\n \n #Final adjustments to meet final ratio\n #Usually it's few pixels. <1% of whole height\n hdiff = round((d - c)*target_ratio, 0) - (b - a)\n\n if (b+hdiff)>height:\n wdiff = (b + hdiff) - height\n wdiff = round(wdiff*target_ratio, 0)\n\n c += wdiff\n b = height\n\n else:\n b += hdiff\n\n try:\n #Cut the image\n img = img[int(a):int(b), int(c):int(d)]\n\n #Convert from BGR to RGB \n img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n \n #Resize image to meet targets dimensions\n img = cv.resize(img, (0,0), fx=target_width_px/(d-c), fy=target_height_px/(b-a), interpolation=cv.INTER_AREA)\n\n #Convert image from OpenCV to PIL and save\n img = Image.fromarray(img)\n img.save(save_path+\"/\"+photo, dpi=(DPI,DPI), quality=100, subsampling=0)\n\n except:\n #Copy photo to reject directory\n shutil.copy2(os.path.join(directory,photo), os.path.join(reject_path,photo))\n yield 1","repo_name":"rumcisse/image-cropping","sub_path":"imageprocess.py","file_name":"imageprocess.py","file_ext":"py","file_size_in_byte":15703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20920245078","text":"\"\"\"\n==================\n=== Deprecated ===\n==================\n\nAs of 2017-11-09, this file is no longer used. The ArcheFilter forecasting\nsystem was only used for the 2015--2016 flu season.\n\n\n===============\n=== Purpose ===\n===============\n\nAssimilates digital surveillance signals and a flu model to produce nowcasts\n(and, secondarily, forecasts) of flu.\n\n\n=================\n=== Changelog ===\n=================\n\n2016-12-08\n + use secrets\n2015-12-30\n + enforce minimum number of bins when sampling\n * quick hack for 2015w50: min_shift from -10 to 0\n2015-12-17\n * penalizing HHS6 curve height by 15%\n2015-12-14\n + AF_Utils.signal* (replace `data_io` version)\n * replace `data_io` with Epidata API call to `signals`\n * prefixed output with [AF]\n - don't penalize tall curves\n - AF_Utils.check (duplicate of AF_Utils._get)\n2015-12-07\n + penalize ridiculously tall curves (i.e. hhs6 on 2015w45)\n2015-11-09\n * near total rewrite of process model and filtering\n2015-10-26\n + first version\n\"\"\"\n\n# built-in\n# external\nfrom filterpy.kalman import MerweScaledSigmaPoints as SigmaPoints\nfrom filterpy.kalman import UnscentedKalmanFilter as UKF\nimport numpy as np\nimport scipy.stats as stats\n# local\nfrom archetype import Archetype\nfrom delphi_epidata import Epidata\nimport epiweek as flu\nfrom fc_abstract import Forecaster\nfrom neldermead import NelderMead\nimport secrets\n\n\nclass FluProcess:\n \"\"\" the model, based on the Archetype idea \"\"\"\n\n def __init__(self, archetype):\n self.archetype = archetype\n self.target_mean = {}\n self.target_var = {}\n self.target_std = {}\n\n def score(self, region, curve):\n # half of summed squared normalized error (from multivariate normal PDF)\n if region == 'hhs6':\n curve = curve * 1.15\n z_scores = self.weights * (curve - self.target_mean[region]) / self.target_std[region]\n return np.dot(z_scores, z_scores) / 2\n\n def scan_grid(self, region, min_shift, max_shift, n_shift, min_scale, max_scale, n_scale):\n # calculate parameter bins\n shifts = np.linspace(min_shift, max_shift, n_shift)\n scales = np.linspace(min_scale, max_scale, n_scale)\n d_shift, d_scale = shifts[1] - shifts[0], scales[1] - scales[0]\n bins = [[(t, s) for s in scales] for t in shifts]\n samples = []\n # get score of curve in center of each bin\n grid = np.zeros((n_shift, n_scale))\n for (t, shift) in enumerate(shifts):\n for (s, scale) in enumerate(scales):\n grid[t][s] = self.score(region, self.archetype[region].instance(scale, shift, False))\n # convert scores to PMF\n grid = np.exp(-grid)\n grid /= np.sum(grid)\n # find best bin index\n best = np.unravel_index(np.argmax(grid), grid.shape)\n return grid, bins, best, d_shift, d_scale\n\n def get_best_fit(self, region, output=None):\n # coarse sweep over global parameter space\n grid, bins, best, d_shift, d_scale = self.scan_grid(region, 0, +10, 32, 1 / 3, 3, 32)\n guess = bins[best[0]][best[1]]\n # initialize derivate-free optimizer to find best parameters\n def objective(params):\n return self.score(region, self.archetype[region].instance(params[1], params[0], False))\n solver = NelderMead(objective, limit_iterations=100, silent=True)\n simplex = solver.get_simplex(len(guess), guess, min(d_shift, d_scale))\n # do the optimization\n shift, scale = solver.run(simplex)._location\n if output is not None:\n output[0] = shift\n output[1] = scale\n # return the best-fit curve\n return self.archetype[region].instance(scale, shift, False)\n\n def get_sample_fits(self, region, num_samples, add_holiday):\n ## find the best part of parameter space\n #loc = [0, 0]\n #self.get_best_fit(region, loc)\n # fine sweep over local parameter space\n #nw, ns = 2, 1.1\n #t1, t2 = loc[0] - nw, loc[0] + nw\n #s1, s2 = loc[1] / ns, loc[1] * ns\n t1, t2 = 0, +10\n s1, s2 = 1 / 3, 3\n grid, bins, best, d_shift, d_scale = self.scan_grid(region, t1, t2, 128, s1, s2, 128)\n # sort by decreasing bin likelihood\n data = []\n for (t, row) in enumerate(bins):\n for (s, (shift, scale)) in enumerate(row):\n data.append((grid[t][s], shift, scale))\n data = np.array(sorted(data, key=lambda d: -d[0]))\n # limit to the bins containing 99% of the probability\n limit = max(1, np.searchsorted(np.cumsum(data[:, 0]), 0.99))\n probs, shifts, scales = data[:limit, 0], data[:limit, 1], data[:limit, 2]\n cprob = np.cumsum(probs / sum(probs))\n # get sample curves\n curves = []\n for i in range(num_samples):\n # randomly select a weighted bin\n index = np.searchsorted(cprob, np.random.random())\n # randomly select a point within the bin\n try:\n shift = shifts[index] + np.random.uniform(-d_shift, +d_shift) / 2\n scale = scales[index] + np.random.uniform(-d_scale, +d_scale) / 2\n except ex:\n print('shift/scale index out of bounds!')\n print(len(shifts), shift, d_shift)\n print(len(scales), scale, d_scale)\n raise ex\n # build the archetype curve with the selected parameters\n curves.append(self.archetype[region].instance(scale, shift, add_holiday))\n return curves, grid\n\n def inform(self, region, mean, var):\n # combine observations and archetype\n self.week = len(mean)\n m1 = mean\n v1 = var\n m2 = self.archetype[region].unaligned_unsmoothed_mean[self.week:]\n v2 = self.archetype[region].unaligned_unsmoothed_var[self.week:]\n self.target_mean[region] = np.hstack((m1, m2))\n #self.target_var = np.ones(len(self.target_mean)) #np.hstack((v1, v2))\n self.target_var[region] = np.hstack((v1, v2))\n self.target_std[region] = self.target_var[region] ** 0.5\n # build weight vector\n self.weights = np.ones(len(self.target_mean[region])) * 0.2\n self.weights[max(0, self.week - 5):self.week] = 1\n\n def forecast(self, state):\n output = []\n for (x, region) in zip(state, AF_Utils.regions):\n self.target_mean[region][self.week - 1] = x\n # TODO: variance here?\n self.target_var[region][self.week - 1] = 1e-3\n curve = self.get_best_fit(region)\n output.append(curve[self.week])\n return np.array(output)\n\n def measure(self, state):\n # twitter (11)\n # wiki (1)\n # uili (11)\n twitter = []\n wiki = []\n uili = []\n for (x, region) in zip(state, AF_Utils.regions):\n ili_nh = x\n ili_h = self.archetype[region].add_holiday_week(ili_nh, self.week)\n twitter.append(ili_nh)\n uili.append(ili_h)\n nat_nh = [AF_Utils.get_national(twitter)]\n nat_h = [AF_Utils.get_national(uili)]\n twitter = nat_nh + twitter\n wiki = nat_nh\n uili = nat_h + uili\n return np.array(twitter + wiki + uili)\n\n\nclass AF_Utils:\n \"\"\" helper for loading (and generating) data \"\"\"\n\n regions = ['hhs%d' % i for i in range(1, 11)]\n\n @staticmethod\n def _get(res):\n if res['result'] != 1:\n raise Exception('API result=%d (%s)' % (res['result'], res['message']))\n return res['epidata']\n\n @staticmethod\n def get_season(season, location):\n #end = (season + 1) * 100 + 29\n #epiweeks = Epidata.range(flu.add_epiweeks(end, -51), end)\n begin = season * 100 + 30\n epiweeks = Epidata.range(begin, flu.add_epiweeks(begin, 51))\n rows = AF_Utils._get(Epidata.ilinet(location, epiweeks))\n return [row['wili'] for row in rows]\n\n @staticmethod\n def initialize_filter(x, P, Q, R, process):\n # Update system state\n fx = lambda x, dt: process.forecast(x)\n # Expected measurement, given system state\n hx = lambda x: process.measure(x)\n # Get the sigma points for the unscented transformation\n # https://github.com/rlabbe/filterpy/blob/master/filterpy/kalman/sigma_points.py\n alpha, beta, kappa = 1e-3, 2, 0\n points = SigmaPoints(n=len(x), alpha=alpha, beta=beta, kappa=kappa)\n # Instantiate an Unscented Kalman Filter\n ukf = UKF(dim_x=len(x), dim_z=len(R[0]), dt=1, hx=hx, fx=fx, points=points)\n ukf.x, ukf.P, ukf.Q, ukf.R = x, P, Q, R\n # Return filter\n return ukf\n\n @staticmethod\n def get_unstable_wILI(region, ew1, ew2):\n weeks = Epidata.range(ew1, ew2)\n epidata = AF_Utils._get(Epidata.fluview(region, weeks, issues=ew2))\n data = [row['wili'] for row in epidata]\n if len(data) != flu.delta_epiweeks(ew1, ew2) + 1:\n raise Exception('missing data')\n return data\n\n @staticmethod\n def get_national(regional):\n weights = [0.045286439944771467, 0.10177386656841922, 0.095681349146225586, 0.19610707945020625, 0.16310640558744591, 0.12488754783066998, 0.043916824425230531, 0.034124204104827027, 0.15298339758467921, 0.041244820532846248]\n return np.dot(weights, regional)\n\n @staticmethod\n def _signal(name, region, epiweek):\n rows = AF_Utils._get(Epidata.signals(secrets.api.signals, name, region, epiweek))\n if len(rows) != 1:\n raise Exception('expected one signal row')\n return rows[0]['value']\n\n @staticmethod\n def signal_twitter(region, epiweek):\n return AF_Utils._signal('twitter', region, epiweek)\n\n @staticmethod\n def signal_wiki(epiweek):\n return AF_Utils._signal('wiki', 'nat', epiweek)\n\n @staticmethod\n def signal_uili(region, epiweek):\n return AF_Utils._signal('uili', region, epiweek)\n\n\nclass Archefilter(Forecaster):\n\n # TODO: calculate backfill at runtime\n BF = {\n 'nat': [0.133, 0.104, 0.071, 0.064, 0.057, 0.048, 0.041, 0.031, 0.028, 0.023],\n 'hhs1': [0.173, 0.098, 0.083, 0.074, 0.066, 0.052, 0.044, 0.041, 0.036, 0.030],\n 'hhs2': [0.384, 0.247, 0.179, 0.143, 0.117, 0.086, 0.064, 0.053, 0.049, 0.044],\n 'hhs3': [0.268, 0.142, 0.106, 0.083, 0.072, 0.067, 0.062, 0.056, 0.052, 0.044],\n 'hhs4': [0.160, 0.076, 0.051, 0.044, 0.039, 0.031, 0.030, 0.029, 0.024, 0.023],\n 'hhs5': [0.159, 0.087, 0.071, 0.066, 0.061, 0.056, 0.051, 0.044, 0.037, 0.036],\n 'hhs6': [0.239, 0.217, 0.096, 0.086, 0.065, 0.054, 0.053, 0.045, 0.041, 0.036],\n 'hhs7': [0.255, 0.190, 0.124, 0.098, 0.072, 0.050, 0.037, 0.024, 0.023, 0.021],\n 'hhs8': [0.160, 0.140, 0.130, 0.122, 0.121, 0.114, 0.110, 0.103, 0.098, 0.093],\n 'hhs9': [0.679, 0.573, 0.446, 0.409, 0.378, 0.320, 0.267, 0.195, 0.170, 0.132],\n 'hhs10': [0.371, 0.299, 0.250, 0.227, 0.210, 0.201, 0.188, 0.189, 0.186, 0.184],\n }\n\n def __init__(self, test_season, locations, num_samples):\n super().__init__('fc-archefilter', test_season, locations)\n self.archetypes = {}\n self.num_samples = num_samples\n\n def run(self, epiweek):\n process = FluProcess(self.archetypes)\n # timing\n ew0 = flu.join_epiweek(self.test_season, 30)\n ew1 = flu.add_epiweeks(ew0, 52)\n num_weeks = flu.delta_epiweeks(ew0, epiweek) + 1\n # setup each region\n _x, _P = [], []\n _Q = [0.5 ** 2] * 10\n _R = [0.7 ** 2] * 11 + [0.5 ** 2] + [0.5 ** 2] * 11\n for region in AF_Utils.regions:\n # get unstable ili up until now\n wili = AF_Utils.get_unstable_wILI(region, ew0, epiweek)\n if len(wili) != num_weeks:\n raise Exception('missing data')\n # remove holiday effect\n wili = np.array(wili) * self.archetypes[region].holiday[:len(wili)]\n # TODO: use an actual backfill model\n bf_var = Archefilter.BF[region][::-1]\n while len(bf_var) < len(wili):\n bf_var = [bf_var[0]] + bf_var\n while len(bf_var) > len(wili):\n bf_var = bf_var[1:]\n bf_var = np.array(bf_var)\n # setup the flu process\n process.inform(region, wili, bf_var)\n # UKF data\n _x.append(wili[-1])\n _P.append(bf_var[-1])\n # set up the UKF\n x = np.array(_x)\n P = np.diag(_P)\n Q = np.diag(_Q)\n R = np.diag(_R)\n ukf = AF_Utils.initialize_filter(x, P, Q, R, process)\n # make it happen\n print(' [AF] state:', ukf.x)\n # predict next week's wILI\n ukf.predict()\n print(' [AF] state:', ukf.x)\n # measure digitial surveillance signals\n ew = flu.add_epiweeks(epiweek, 1)\n twitter, wiki, uili = [], [], []\n for region in ['nat'] + AF_Utils.regions:\n twitter.append(AF_Utils.signal_twitter(region, ew))\n if region == 'nat':\n wiki.append(AF_Utils.signal_wiki(ew))\n uili.append(AF_Utils.signal_uili(region, ew))\n measurement = np.array(twitter + wiki + uili)\n print(' [AF] measurement:', measurement)\n ukf.update(measurement)\n print(' [AF] state:', ukf.x)\n # update the process with the latest estimate\n for (i, region) in enumerate(AF_Utils.regions + ['nat']):\n # get unstable ili up until now\n wili = AF_Utils.get_unstable_wILI(region, ew0, epiweek)\n if len(wili) != num_weeks:\n raise Exception('missing data')\n # remove holiday effect\n wili = np.array(wili) * self.archetypes[region].holiday[:len(wili)]\n # TODO: use an actual backfill model\n bf_var = Archefilter.BF[region][::-1]\n while len(bf_var) < len(wili):\n bf_var = [bf_var[0]] + bf_var\n while len(bf_var) > len(wili):\n bf_var = bf_var[1:]\n bf_var = np.array(bf_var)\n # add in the filter state\n if region == 'nat':\n national = AF_Utils.get_national(ukf.x)\n # TODO: what is national variance?\n x = np.mean(np.diag(ukf.P))\n est_mean = np.hstack((wili, np.array([national])))\n est_var = np.hstack((bf_var, np.array([x])))\n else:\n est_mean = np.hstack((wili, np.array([ukf.x[i]])))\n est_var = np.hstack((bf_var, np.array([ukf.P[i][i]])))\n process.inform(region, est_mean, est_var)\n self.process = process\n\n def _train(self, region):\n # get the data and build the archetype\n train_seasons = [season for season in range(2004, self.test_season) if season not in (2008, 2009)]\n curves = [AF_Utils.get_season(season, region) for season in train_seasons]\n self.archetypes[region] = Archetype(curves, baseline=0)\n\n def _forecast(self, region, epiweek):\n if region == 'nat':\n self.run(epiweek)\n # use the process for each region to get sample curves\n curves, grid = self.process.get_sample_fits(region, self.num_samples, True)\n #if region == 'nat':\n # import pylab as plt\n # for c in curves[:25]:\n # plt.plot(c, color='#888888', linewidth=1)\n # ew0 = flu.join_epiweek(self.test_season, 30)\n # wili = AF_Utils.get_unstable_wILI(region, ew0, epiweek)\n # plt.plot(wili, color='#000000', linewidth=2)\n # plt.show()\n #raise Exception()\n return [curve[10:43] for curve in curves]\n","repo_name":"cmu-delphi/flu-contest","sub_path":"src/archefilter/fc_archefilter.py","file_name":"fc_archefilter.py","file_ext":"py","file_size_in_byte":14306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12041109385","text":"import os\r\nimport cv2\r\nimport random\r\nimport shutil\r\nfrom shutil import copy\r\n\r\n\r\ndef newloader_FEN(path, namep, final, round):\r\n if os.path.exists(path+'/%s'% round):\r\n\r\n cv2.imwrite(path + '/%s/' % round + '%s.png' % namep, final)\r\n else:\r\n os.makedirs(path + '/%strainlabel' % round)\r\n os.makedirs(path + '/%svallabel' % round)\r\n os.makedirs(path + '/%strainimg' % round)\r\n os.makedirs(path + '/%svalimg' % round)\r\n os.makedirs(path + '/%s' % round)\r\n os.makedirs(path + '/%sFCSSNpth' % round)\r\n os.makedirs(path + '/%sFCSSNout' % round)\r\n cv2.imwrite(path+'/%s/'%round + '%s.png' % namep, final)\r\n\r\ndef train_loader1(input, round):\r\n\r\n trainfiles = input + '%s'%round\r\n ai = os.listdir(trainfiles).copy()\r\n\r\n num_train = len(ai)\r\n\r\n index_list = list(range(num_train))\r\n\r\n random.shuffle(index_list)\r\n num = 0\r\n trainDir = input + '\\\\%strainlabel\\\\'%round\r\n\r\n validDir = input + '\\\\%svallabel\\\\'%round\r\n\r\n files1 = os.listdir(trainfiles).copy()\r\n for i in range(len(files1)):\r\n fileName = os.path.join(input+'%s\\\\'%round, str(files1[i]))\r\n\r\n\r\n if num < num_train*0.7:\r\n\r\n copy(fileName, trainDir + str(files1[i]))\r\n else:\r\n copy(fileName, validDir + str(files1[i]))\r\n num += 1\r\n\r\n return trainDir, validDir\r\n\r\ndef label_to_img(imgDir, trainDir, validDir):\r\n files1 = os.listdir(trainDir).copy()\r\n for i in range(len(files1)):\r\n img = cv2.imread(imgDir + str(files1[i]))\r\n\r\n cv2.imwrite(trainDir[:-11] + 'trainimg\\\\'+ str(files1[i]), img)\r\n\r\n files2 = os.listdir(validDir).copy()\r\n for j in range(len(files2)):\r\n img1 = cv2.imread(imgDir + str(files2[j]))\r\n cv2.imwrite(validDir[:-9] + 'valimg\\\\' + str(files2[j]), img1)\r\n\r\n\r\n\r\n","repo_name":"fjc1575/Marine-Aquaculture","sub_path":"IDUDL/train_val_dataloader.py","file_name":"train_val_dataloader.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31535517779","text":"import requests\nimport os\nimport json\n\n\ndef get_files_list():\n url = \"https://slack.com/api/files.list\"\n files = []\n count = 1000\n slack_res = requests.get(url, params = {\"token\": os.environ[\"SLACK_TOKEN\"], \"count\" : count}).json()\n if slack_res[\"ok\"]:\n timestamp = [file.get(\"timestamp\") for file in slack_res[\"files\"]]\n file_id = [file.get(\"id\") for file in slack_res[\"files\"]]\n for i in range(len(slack_res[\"files\"])):\n files.append({\"timestamp\": timestamp[i], \"file_id\": file_id[i]})\n return files\n else:\n return -1\n","repo_name":"nitoc-ict/slack-FileRemove","sub_path":"files_list.py","file_name":"files_list.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14183713214","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Author: Siva Prasad Varma\n\nimport csv\nfrom heapq import heappush, heappop\nimport doctest\n\n\n# Constants\ninf = float('inf')\n\ndef solve(W):\n \"\"\"\n >>> W = [[131, 673, 234, 103, 18], [201, 96, 342, 965, 150],\n ... [630, 803, 746, 422, 111], [537, 699, 497, 121, 956],\n ... [805, 732, 524, 37, 331]]\n >>> solve(W)\n Minimum Sum path cost: 2297\n \"\"\"\n # setup dijkstra\n h, w = len(W), len(W[0])\n D, P = dijsktra(W, (0,0))\n print('Minimum Sum path cost: {}'.format(D[(h-1, w-1)]))\n\ndef dijsktra(W, s):\n # Dijkstra algorithm for single source shortest path\n h, w = len(W), len(W[0])\n D, P, Q, S = {s:W[s[0]][s[1]]}, {}, [(W[0][0], s)], set()\n while Q:\n _, u = heappop(Q) # Node with lowest estimate\n if u in S: continue\n S.add(u)\n for v in neighbours(u, h, w):\n relax(W, u, v, D, P)\n heappush(Q, (D[v], v))\n return D, P\n\ndef neighbours(u, h, w):\n x, y = u\n if x > 0:\n yield (x-1, y)\n if x+1 < h:\n yield (x+1, y)\n if y > 0:\n yield (x, y-1)\n if y +1 < w:\n yield (x, y+1)\n\n\ndef relax(W, u, v, D, P):\n d = D.get(u, inf) + W[v[0]][v[1]] # cost of edge is dependent only on v\n if d < D.get(v, inf):\n D[v], P[v] = d, u\n return True\n\nif __name__ == '__main__':\n doctest.testmod()\n # read the matrix from text file\n W = []\n with open('..\\data\\p083_matrix.txt') as f:\n reader = csv.reader(f)\n for row in reader:\n W.append(list(map(int, row)))\n print('h, w = {}, {}'.format(len(W), len(W[0])))\n solve(W)\n","repo_name":"sivapvarma/projecteuler","sub_path":"python/p083.py","file_name":"p083.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18516165346","text":"# Binary Search\ndef solution(n, times):\n answer = 0\n left = min(times)\n right = max(times)*n # 가장 긴 심사시간이 소요되는 심사관에게 n명 모두 심사받는 경우\n while left <= right:\n mid = (left+right)//2\n checked = 0\n for time in times:\n checked += mid//time # checked: 모든 심사관들이 mid분 ���안 심사한 사람의 수\n if checked >= n: # 모든 심사관을 거치지 않아도 mid분 동안 n명 이상 심사하면 탈출 \n break\n if checked >= n: # 심사한 사람의 수가 심사 받아야할 사람의 수(n)보다 많거나 같은 경우\n answer = mid\n right = mid-1\n elif checked < n: # 심사한 사람의 수가 심사 받아야할 사람의 수(n)보다 적은 경우\n left = mid+1\n return answer\n\n# 단순 구현 -> O(n^2)\ndef first_try(n, times):\n q = list()\n for time in times:\n schedule = [time*(i+1) for i in range(n)]\n q.extend(schedule)\n return sorted(q)[n-1]\n\n# O(n^2)\ndef second_try(n, times):\n i = 1\n while n:\n for time in times:\n if not i%time: n-=1\n if not n: return i\n i += 1\n\n# Min Heap 이지만 O(n^2)\nimport heapq\ndef third_try(n, times):\n heap = list()\n for i in range(n):\n for time in times:\n heapq.heappush(heap, time*(i+1))\n res = heapq.heappop(heap)\n return res","repo_name":"hanqpark/coding_test","sub_path":"programmers/고득점 Kit/coding_test/binary_search/입국심사.py","file_name":"입국심사.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22232967846","text":"import numpy as np\nimport numpy.ma as ma\nfrom scipy import ndimage as ndi\nfrom metpy.units import check_units, concatenate, units\nfrom metpy.calc import wind_direction, wind_speed, wind_components\n\ndef grad_mask_arcalg(REFmasked,REF,storm_relative_dir,ZDRmasked1,CC):\n #Inputs,\n #Zint: 1km AFL grid level\n #REFmasked: REF masked below 20 dBz\n #REF: 1km Reflectivity grid\n #storm_relative_dir: Vector direction along the reflectivity gradient in the forward flank\n #ZDRmasked1: 1km Differential Reflectiity (Zdr) grid, masked below 20 dBz reflectivity\n #ZDRrmasked1: Full volume Zdr gridded, masked below 20 dBz reflectivity\n #CC: 1km Correlation Coefficient (CC) grid\n #CCall: Full volume CC gridded\n print('Gradient Analysis and Masking')\n #Determining gradient direction and masking some Zhh and Zdr grid fields\n\n smoothed_ref1 = ndi.gaussian_filter(REFmasked, sigma = 2, order = 0)\n REFgradient = np.asarray(np.gradient(smoothed_ref1))\n REFgradient[0,:,:] = ma.masked_where(REF < 20, REFgradient[0,:,:])\n REFgradient[1,:,:] = ma.masked_where(REF < 20, REFgradient[1,:,:])\n grad_dir1 = wind_direction(REFgradient[1,:,:] * units('m/s'), REFgradient[0,:,:] * units('m/s'))\n grad_mag = wind_speed(REFgradient[1,:,:] * units('m/s'), REFgradient[0,:,:] * units('m/s'))\n grad_dir = ma.masked_where(REF < 20, grad_dir1)\n\n #Get difference between the gradient direction and the FFD gradient direction calculated earlier\n srdir = storm_relative_dir\n srirad = np.copy(srdir)*units('degrees').to('radian')\n grad_dir = grad_dir*units('degrees').to('radian')\n grad_ffd = np.abs(np.arctan2(np.sin(grad_dir-srirad), np.cos(grad_dir-srirad)))\n grad_ffd = np.asarray(grad_ffd)*units('radian')\n grad_ex = np.copy(grad_ffd)\n grad_ffd = grad_ffd.to('degrees')\n\n #Mask out areas where the difference between the two is too large and the ZDR is likely not in the forward flank\n ZDRmasked2 = ma.masked_where(grad_ffd > 120 * units('degrees'), ZDRmasked1)\n ZDRmasked = ma.masked_where(CC < .60, ZDRmasked2)\n\n #Add a fill value for the ZDR mask so that contours will be closed\n ZDRmasked = ma.filled(ZDRmasked, fill_value = -2)\n\n #Returning variables,\n #grad_mag: Array of wind velocity magnitude along reflectivity gradient\n #grad_ffd: Angle (degrees) used to indicate angular region of supercell containing the forward flank\n #ZDRmasked: Masked array ZDRmasked1 in regions outside the forward flank (grad_ffd) and below 0.6 CC\n #ZDRallmasked: Masked volume array (ZDRrmasked1) below 0.7 CC and filled with -2.0 values\n #ZDRrmasked: ZDRallmasked slice at 1km above freezing level\n return grad_mag,grad_ffd,ZDRmasked","repo_name":"mwilson14/ZDRArcAlgorithm","sub_path":"gradient_section_arcalg.py","file_name":"gradient_section_arcalg.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"26729308145","text":"from numpy import log2, ceil\nfrom numpy.random import uniform\nfrom random import choice, randrange\nfrom operator import itemgetter\nfrom matplotlib import pyplot as plt\n\n\ndef main():\n # Datele de intrare se pot stabili in parametrii obiectului Environment\n env = Environment(\n fitness='x**3+3 * x**2 - 4*x + 7',\n pop_size=10, domain=(-4, 3), iterations=5000, precision=10)\n env.train()\n\n\nclass Environment:\n def __init__(\n self,\n pop_size=20, # dimensiunea populatiei\n domain=(-1, 2), # domeniul\n fitness='-x**2 + x + 2', # functia de maximizat\n precision=6, # precizia p\n crossover_prob=0.25, # rata de imperechere\n mutation_prob=0.05, # rata de mutatie per gena\n iterations=50): # iteratii\n self.pop_size = pop_size\n self.domain = domain\n self.fitness = compile(fitness, __name__, 'eval')\n self.precision = precision\n self.crossover_prob = crossover_prob\n self.mutation_prob = mutation_prob\n self.iterations = iterations\n # calculam lungimea reprezentarii\n self.length = int(ceil(log2((domain[1] - domain[0]) * 10**precision)))\n\n # istoric evolutie pentru afisare\n self.first = True\n self.points = []\n self.mean_points = []\n\n def train(self):\n # generatia initiala\n self.genesis()\n for iteration in range(self.iterations):\n # calculam probabilitatile, supravietuitorii si imperecherea\n probs = self.probabilities()\n intervals = partial_sums([0] + [v for k, v in probs.items()])\n survivors = self.select(intervals)\n mates = self.select_mates(survivors)\n\n if self.first:\n print(\"Probabilitati supravietuire\")\n for i, (k, v) in enumerate(probs.items()):\n print(f\"cromozom {i} p= {v:.5f}\")\n print()\n\n print(\"Intervale probabilitati supravietuire\")\n print(intervals)\n print()\n\n survivors = [list(probs.keys())[i] for i in survivors]\n\n if self.first:\n print(\"Supravietuitori\")\n for i, v in enumerate(survivors):\n print(f\"{i}: {v}\")\n print()\n\n offspring = self.mate(mates, survivors)\n\n if self.first:\n print(\"Imperechere\")\n j = 0\n for i in range(0, len(mates), 2):\n if i + 1 >= len(mates):\n break\n print(f\"Imperechere intre {mates[i]} si {mates[i+1]}\")\n print(f\"Copii: {offspring[j][0]} si {offspring[j][1]}\")\n print()\n j += 1\n\n # Copiii pot suferi mutatii\n for X, Y in offspring:\n X.mutate(self.mutation_prob)\n X.mutate_by_inversion(self.mutation_prob)\n Y.mutate(self.mutation_prob)\n Y.mutate_by_inversion(self.mutation_prob)\n\n j = 0\n for i in range(0, len(mates), 2):\n if i + 1 >= len(mates):\n break\n survivors[mates[i]] = offspring[j][0]\n survivors[mates[i+1]] = offspring[j][1]\n j += 1\n\n # Adaugam cel mai bun exemplar in noua generatie\n # (selectie elitista)\n survivors = [\n list(self.population.keys())[self.fittest()]\n ] + survivors\n\n self.population = {X: self.evaluate(X) for X in survivors}\n\n if self.first:\n print(\"Generatia urmatoare dupa mutatie\")\n for i, (k, v) in enumerate(self.population.items()):\n print(f\"{i}: {k} f= {v:.5f}\")\n print()\n\n if self.first:\n print(\"Performanta maxima si medie\")\n\n best = list(self.population.items())[self.fittest()]\n #self.points.append(self.evaluate(best[0]))\n\n mean = sum([v for k, v in self.population.items()]) / len(self.population.items())\n self.mean_points.append(mean)\n\n print(f\"Maxima {best} Medie {mean}\")\n\n self.first = False if self.first else self.first\n\n # Grafice evolutie\n plt.title('Performanta maxima')\n plt.plot(list(range(len(self.points))), self.points)\n plt.xlabel('Generatii')\n plt.ylabel('Performanta')\n plt.show()\n\n plt.title('Performanta medie')\n plt.plot(list(range(len(self.mean_points))), self.mean_points)\n plt.xlabel('Generatii')\n plt.ylabel('Performanta')\n plt.show()\n\n def genesis(self):\n # Creem cromozomi cu configuratii aleatoare\n self.population = [\n Chromosome(\n random_string('01', self.length),\n self.domain) for _ in range(self.pop_size)\n ]\n self.population = {X: self.evaluate(X) for X in self.population}\n\n if self.first:\n print(\"Generatia initiala\")\n for i, (k, v) in enumerate(self.population.items()):\n print(f\"{i}: {k} f= {v:.5f}\")\n print()\n\n def evaluate(self, X):\n return eval(self.fitness, {'x': X.value()})\n\n def fittest(self):\n # Cautam cel mai bun exemplar\n return list(self.population.keys()).index(max(\n self.population.items(),\n key=itemgetter(1)\n )[0])\n\n def probabilities(self):\n # Probabilitaile de supravietuire\n total = sum([v for k, v in self.population.items()])\n return {\n X: (value / total) for X, value in self.population.items()\n }\n\n def select(self, intervals):\n # Selectia aleatoare\n return [\n interval_search(\n uniform(),\n intervals\n ) for _ in range(self.pop_size)\n ]\n\n def select_mates(self, survivors):\n # Generarea perechilor\n return [\n i for i in range(len(survivors)) if uniform() < self.crossover_prob\n ]\n\n def mate(self, mates, pop):\n # Generarea de noi copii\n offspring = []\n for i in range(0, len(mates), 2):\n if i + 1 >= len(mates):\n break\n offspring.append(pop[mates[i]] + pop[mates[i+1]])\n return offspring\n\n\ndef partial_sums(lst):\n sums = lst.copy()\n for i in range(1, len(lst)):\n sums[i] += sums[i-1]\n return sums\n\n\ndef random_string(chars, length):\n return ''.join(choice(chars) for _ in range(length))\n\n# Cautare binara in intervale\ndef interval_search(u, intervals):\n left, right = 0, len(intervals)\n\n while left <= right:\n mid = (left + right) // 2\n if (mid + 1 < len(intervals) and\n intervals[mid] <= u and u < intervals[mid + 1]):\n return mid\n elif u < intervals[mid]:\n right = mid - 1\n elif mid + 1 < len(intervals) and u >= intervals[mid + 1]:\n left = mid + 1\n\n return None\n\n\nclass Chromosome:\n def __init__(self, bits, domain):\n self.bits = bits\n self.x_10 = int(bits, 2)\n self.domain = domain\n self.length = len(bits)\n\n def value(self):\n # Transformare reprezentare binara -> numar\n return ((self.domain[1] - self.domain[0]) /\n (2**self.length - 1) * self.x_10 +\n self.domain[0])\n\n def mutate(self, prob):\n # Mutare aleatoare gene\n new_bits = [bit for bit in self.bits]\n for i in range(len(new_bits)):\n if uniform() < prob:\n if new_bits[i] == '1':\n new_bits[i] = '0'\n else:\n new_bits[i] = '1'\n self.bits = ''.join(new_bits)\n\n def mutate_by_inversion(self, prob):\n if uniform() < prob: return\n\n points = [randrange(0, self.length) for _ in range(2)]\n points = sorted(points)\n\n if points[0] == points[1]: return\n\n left = self.bits[:points[0]]\n middle = self.bits[points[0]:points[1]:-1]\n right = self.bits[points[1]:]\n\n self.bits = left + middle + right\n\n def __add__(self, rhs):\n # Imperechere cu o singura taietura\n cut = randrange(0, self.length)\n return (\n Chromosome(self.bits[:cut] + rhs.bits[cut:], self.domain),\n Chromosome(self.bits[cut:] + rhs.bits[:cut], self.domain)\n )\n\n def __repr__(self):\n return f\"{self.bits} x= {self.value():.5f}\"\n\n\nif __name__ == \"__main__\":\n # Teste\n assert Environment().length == 22\n assert (Chromosome(\"0000011101001001110001\", (-1, 2)).value() ==\n -0.9145920073013323)\n assert interval_search(0.3, [\n 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7\n ]) == 3\n assert partial_sums([1, 2, 3, 4]) == [1, 3, 6, 10]\n\n main()\n","repo_name":"JustBeYou/AlgAvan","sub_path":"gen/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19020400298","text":"import redis\r\nimport random\r\nimport json\r\nimport CalculateurFFT\r\nimport time\r\nr = redis.Redis(host=\"localhost\", port=6379, db=0)\r\n\r\n\r\nwhile True :\r\n for nom_radar in [\"Ard\", \"RPi_1\", \"RPi_2\"] :\r\n echantillons = {\"I\" : [int(2000*random.random()) for _ in range(2000)],\r\n \"Q\" : [int(2000*random.random()) for _ in range(2000)]}\r\n dsp_radar = CalculateurFFT.calculerFFT(echantillons, 4, 2048, 40, 20, 1/100)\r\n associations = CalculateurFFT.associations_frequences(dsp_radar[\"FFT\"])\r\n dsp_radar[\"associations\"] = associations\r\n ################\r\n points_generes = [{\"d\" : 3*random.random(), \"v\" : 3*random.random()}]\r\n dsp_radar[\"associations\"][\"points\"][\"points_potentiels\"] = points_generes\r\n ################\r\n r.set(\"dsp_\" + nom_radar, json.dumps(dsp_radar))\r\n #r.set(\"points_\" + nom_radar, json.dumps(points_generes))\r\n r.set(\"points_\" + nom_radar, json.dumps([\r\n {\"d\" : 3*random.random(), \"v\" : 3*random.random()},\r\n {\"d\" : 3*random.random(), \"v\" : 3*random.random()}, \r\n {\"d\" : 3*random.random(), \"v\" : 3*random.random()}\r\n ])\r\n )\r\n time.sleep(0.05)\r\n\r\n\r\n","repo_name":"madaaaaaaaaa/ProjetSysteme","sub_path":"generateurRadars.py","file_name":"generateurRadars.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35229560276","text":"#! /usr/bin/env python3\n\nfrom random import randint\n\ndef main():\n print(\"{\")\n nVar = 5000\n for i in range(nVar):\n print(\"int variable_\" + str(i) + \" = \" + str(randint(0, 1000000)) + \";\")\n print(\"}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"samuel-thiken/Compiler","sub_path":"src/test/script/generators/big_tests.py","file_name":"big_tests.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19635380111","text":"#currency converted\n\nimport re\n\ncurrencies = {\n 'USD' : 1,\n 'EUR' : 1.09,\n 'GBP' : 1.27\n}\n\nprint(\"Available currencies: USD, EUR, GBP\")\ncash_query = input(\"Enter what currency you wish to convert to, your current currency and how much of the currency you have: \")\n\nfragmented = cash_query.split()\ntarget, given, amount = fragmented\n\nif given in currencies and target in currencies:\n rate = currencies[given] / currencies[target]\n converted_amount = float(amount) * rate\n print(f\"{amount} {given} is equal to {converted_amount} {target}\")\nelse:\n print(\"error\")","repo_name":"Jmclark3592/python_learning","sub_path":"CurrencyConverter.py","file_name":"CurrencyConverter.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2729940522","text":"'''\nWord Break 1\n\n'''\n\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n memo = collections.defaultdict(list)\n wordDict = set(wordDict)\n\n def helper(word):\n\n if not word:\n return True\n\n if word in memo:\n return memo[word]\n\n for end in range(1, len(word) + 1):\n current_word = word[:end]\n if current_word in wordDict:\n val = helper(word[end:])\n memo[word] = val\n if val:\n return True\n\n return False\n\n return helper(s)\n\n'''\nWord Break 2\n\n'''\n\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:\n\n memo = collections.defaultdict(list)\n wordDict = set(wordDict)\n\n def find_sentences(sentence):\n\n if not sentence:\n return [[]]\n\n if sentence in memo:\n return memo[sentence]\n\n for sentence_end in range(1, len(sentence) + 1):\n current_word = sentence[:sentence_end]\n if current_word in wordDict:\n sentence_breaks = find_sentences(sentence[sentence_end:])\n\n for sentence_break in sentence_breaks:\n memo[sentence].append([current_word] + sentence_break)\n\n return memo[sentence]\n\n find_sentences(s)\n return [\" \".join(sentence) for sentence in memo[s]]\n","repo_name":"shubhamadep/Interview-Coding-Practice","sub_path":"wordBreak1&2.py","file_name":"wordBreak1&2.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5042867256","text":"from django.conf.urls import patterns, include, url\nfrom golf_league import settings\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\nhandler404 = 'golf.views.custom_404'\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'golf_league.views.home', name='home'),\n # url(r'^golf_league/', include('golf_league.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^golf/', include('golf.urls')),\n # url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n)\nif not settings.DEBUG:\n urlpatterns += patterns('',\n (r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n )","repo_name":"gojonesy/golf","sub_path":"golf_league/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43946330345","text":"possible_combinations = [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n \"q\",\n \"r\",\n \"s\",\n \"t\",\n \"u\",\n \"v\",\n \"w\",\n \"x\",\n \"y\",\n \"z\",\n]\n\n\ndef get_all_alphabet_combinations_list(length):\n print(f\"making list of length {length}\")\n result = []\n while length > 0:\n if len(result) == 0:\n result = possible_combinations\n else:\n new_result = []\n for i, v in enumerate(result):\n for i2, v2 in enumerate(possible_combinations):\n new_result.append(v + v2)\n result = new_result\n length -= 1\n return result\n\n\ndef has_all_alphabet_combinations(string):\n for _, v1 in enumerate(possible_combinations):\n for _, v2 in enumerate(possible_combinations):\n if v1 + v2 not in string:\n return False\n return True\n\n\nsearch_string_lower_bound = 2 * 26 # 26 letters, 2 times each\nsearch_string_upper_bound = 2 * 26 * 26 # 26 letters, 2 times each, 26 combinations\n\nfor i in range(search_string_lower_bound, search_string_upper_bound):\n candidates = get_all_alphabet_combinations_list(i)\n for _, v in enumerate(candidates):\n print(v)\n if has_all_alphabet_combinations(v):\n print(\"Found it: \", v)\n exit(0)\n","repo_name":"anaclumos/the-library-of-babel","sub_path":"experiment3.py","file_name":"experiment3.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"36042408425","text":"import json\nimport os\nfrom pathlib import Path\nfrom random import random\n\nfrom fastapi import FastAPI, HTTPException\nfrom fastapi import status\nfrom fastapi.responses import JSONResponse\n\nDATA_DIR = Path(os.path.dirname(__file__)) / \"data\"\nFAIL_RATE = int(os.getenv(\"FAIL_RATE\", 0))\n\n\napp = FastAPI()\n\n\n@app.middleware(\"http\")\nasync def controlled_fail_middleware(request, call_next):\n if FAIL_RATE / 100 > random():\n return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)\n return await call_next(request)\n\n\ndef read_data(file_):\n return json.load(open(file_))\n\n\n@app.get(\"/catalogs\")\nasync def catalog_list():\n files = DATA_DIR.glob(\"*.json\")\n return [read_data(f_) for f_ in files]\n\n\n@app.get(\"/catalogs/{code}\")\nasync def catalog_retrieve(code):\n try:\n return read_data(DATA_DIR / f\"{code}.json\")\n except FileNotFoundError:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)\n","repo_name":"jeanmask/pybr21-fake-catalog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12499509509","text":"from typing import Any, Hashable, List, Union\nfrom mmdict.multidict import MultiDict\n\nclass CaselessMultiDict(MultiDict):\n def __init__(self, *args, **kwargs):\n # A mapping of the {storage key -> supplied key}\n # Keys that are not transformed are not stored.\n self.storage_to_external = {}\n\n super().__init__(*args, **kwargs)\n\n # New internal helpers for case insensitivity and external\n # key mapping tracking.\n def __casefold_when_possible(self, key: Union[str, Any]) -> Union[str, Any]:\n '''\n Call `key.upcase()` if key is a string, otherwise\n return the input unchanged.\n '''\n if isinstance(key, str):\n return key.casefold()\n return key\n\n def __try_to_forget_external_name(self, storage_key):\n '''\n Attemp to forget an external name for a storage key if it no longer has\n a value in the sore, and has no remaining aliases defined.\n Otherwise, do nothing.\n '''\n remaining_aliases = self.storage_to_aliases.get(storage_key, set())\n has_data = storage_key in self.value_store\n if not len(remaining_aliases) and not has_data:\n if storage_key in self.storage_to_aliases:\n del self.storage_to_aliases[storage_key]\n\n # Key mapping to and from presentation and storage\n def _to_storage_key(self, key):\n return self.__casefold_when_possible(\n super()._to_storage_key(key)\n )\n\n def _to_internal_alias(self, key):\n return self.__casefold_when_possible(\n super()._to_internal_alias(key)\n )\n\n def _to_external_key(self, key):\n not_found = KeyError(\"not found\")\n external = self.storage_to_external.get(key, not_found)\n if external != not_found:\n return external\n return super()._to_external_key(key)\n\n # Protocol methods to keep track of representation and storage keys\n def alias(self, canonical: Hashable, aliases: List[Hashable]):\n storage_key = self._to_storage_key(canonical)\n self.storage_to_external.setdefault(storage_key, canonical)\n return super().alias(canonical, aliases)\n\n def unalias(self, alias: Hashable) -> bool:\n storage_key = self._to_storage_key(alias)\n result = super().unalias(alias)\n self.__try_to_forget_external_name(storage_key)\n return result\n\n def __setitem__(self, key, value):\n # We do not need to track the given names of aliases\n if not self.is_alias(key):\n storage_key = self._to_storage_key(key)\n # We only need to track the first given value for a key when:\n # * It is not already defined as a destination by an alias declaration.\n # * Only if we transform the key\n # * We have not used the storage slot before\n has_value = storage_key in self.value_store\n is_destination = storage_key in self.storage_to_aliases\n if not is_destination and not has_value:\n self.storage_to_external[storage_key] = key\n\n return super().__setitem__(key, value)\n\n def __delitem__(self, key):\n result = super().__delitem__(key)\n storage_key = self._to_storage_key(key)\n self.__try_to_forget_external_name(storage_key)\n return result\n","repo_name":"sshirokov/mmdict","sub_path":"src/mmdict/caseless.py","file_name":"caseless.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"2534945111","text":"import os.path\nfrom fastapi import FastAPI, File\nimport face_recognition\nfrom file import create_file, face_distance_to_conf\nimport uuid\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseModel\n\n\nclass ComparisonResponse(BaseModel):\n # is_same: bool\n percentage: float\n\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.post(\"/\", response_model=ComparisonResponse)\ndef read_root(cni: bytes = File(...), img: bytes = File(...)):\n print(\"**** Request ****\")\n img1_location = create_file(cni, str(uuid.uuid1()))\n face1 = face_recognition.load_image_file(img1_location)\n f1_face_encoding = face_recognition.face_encodings(face1)[0]\n\n img2_location = create_file(img, str(uuid.uuid1()))\n face2 = face_recognition.load_image_file(img2_location)\n f2_face_encoding = face_recognition.face_encodings(face2)[0]\n\n # result_bool: bool = face_recognition.compare_faces([f1_face_encoding], f2_face_encoding)[0]\n face_distance = face_recognition.face_distance([f1_face_encoding], f2_face_encoding)[0]\n\n if os.path.exists(img1_location):\n os.remove(img1_location)\n\n if os.path.exists(img2_location):\n os.remove(img2_location)\n\n result = {\n # \"is_same\": bool(result_bool),\n \"percentage\": face_distance_to_conf(face_distance)*100\n }\n\n return result\n","repo_name":"jules2m1998/face-recon-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40939031565","text":"from botocore.vendored import requests\nfrom datetime import datetime, timedelta, timezone\nimport json\nimport sys\nimport os.path\nimport time\n\nwith open('credentials.json', 'r') as f:\n creds = json.load(f)\n\nMETRA_AUTH = (creds['metraClient'], creds['metraSecret'])\nMETRA_URL = 'https://gtfsapi.metrarail.com/gtfs'\nSLACK_URL = 'https://hooks.slack.com/services/' + creds['slackHook']\nCALENDAR_URL = 'https://www.googleapis.com/calendar/v3/calendars/' + creds['googleCalendar']\nGOOGLE_KEY = creds['googleKey']\nRED = '#D00000'\nYELLOW = '#D0D000'\nGREEN = '#00D000'\nMAGENTA = '#FF00FF'\nTIME_FORMAT = '%H:%M'\n\ndelayed_template = '{adjust_time} arrival for {normal_time} {direction} train. ({delay})'\nnormal_template = '{normal_time} {direction} train is arriving on time.'\n\ndef get(endpoint):\n resp = requests.get(METRA_URL + endpoint, auth=METRA_AUTH)\n return resp.json()\n\ndef pretty(obj):\n return json.dumps(obj, sort_keys=True, indent=4, separators=(',',': '))\n \ndef post_slack(title='', text='', color=MAGENTA):\n slack_payload = { \n 'username': 'Metra Updates',\n 'channel': '#metra',\n 'icon_emoji': ':steam_locomotive:',\n 'attachments': [\n {\n 'fallback': text,\n 'color': color,\n 'fields': [\n {\n 'title': title,\n 'value': text,\n 'short': False\n }\n ]\n }\n ]\n }\n requests.post(SLACK_URL, data=json.dumps(slack_payload), headers={'Content-Type': 'application/json'})\n\ndef load_input(path):\n with open(path,'r') as f:\n favorites = json.load(f)\n # TODO Add validation that input is the data expected.\n return favorites\n\nstop_times_path = 'stop_times.json'\ndef load_stop_times(local=True):\n global stop_times\n if local and os.path.isfile(stop_times_path):\n print('Using local stop times.')\n with open(stop_times_path, 'r') as f:\n stop_times = json.load(f)\n else:\n print('Fetching remote stop times.')\n stop_times = get('/schedule/stop_times')\n with open(stop_times_path, 'w') as f:\n json.dump(stop_times, f)\n\ndef find_trip_id(stop_id, stop_time):\n load_stop_times()\n arrival_time = stop_time + ':00' # add seconds\n for x in stop_times:\n if x['stop_id'] == stop_id and x['arrival_time'] == arrival_time: \n return x['trip_id']\n return None\n\ntrip_updates = None # reset to None when want to refresh\ndef get_delays(trip_id, stop_id):\n global trip_updates\n if trip_updates is None:\n trip_updates = get('/tripUpdates')\n for x in trip_updates:\n if x['id'] == trip_id:\n for y in x['trip_update']['stop_time_update']:\n if y['stop_id'] == stop_id:\n return y['arrival']['delay'] # arrival.time.low\n return 0\n\ndef lambda_handler(event, context):\n load_stop_times(local=True)\n now = datetime.utcnow()\n last_hour = now - timedelta(hours=3)\n next_hour = now + timedelta(hours=3)\n # next_day = datetime(now.year, now.month, now.day) + timedelta(days=1) # copy without time components\n params = {\n 'key': GOOGLE_KEY,\n 'timeMin': last_hour.isoformat('T') + 'Z',\n 'timeMax': next_hour.isoformat('T') + 'Z',\n 'singleEvents': True # expands recurring events into their own objects.\n }\n r = requests.get(CALENDAR_URL + '/events', params=params)\n for i in r.json()['items']:\n data = i['description'].split('\\n')\n trip_id = data[0]\n stop_id = data[1]\n for x in stop_times:\n if x['trip_id'] == trip_id and x['stop_id'] == stop_id:\n stop_time = x['arrival_time'][:-3]\n previous_arrival = datetime.strptime(i['start']['dateTime'] , '%Y-%m-%dT%H:%M:%S%z').replace(tzinfo=None)\n normal_arrival = datetime.strptime(stop_time, TIME_FORMAT).replace(year=previous_arrival.year, month=previous_arrival.month, day=previous_arrival.day)\n normal_time = stop_time\n direction = 'inbound' if normal_arrival.hour < 12 else 'outbound' # TODO Assumes all trains before noon are inbound, which isn't true.\n delay = get_delays(trip_id, stop_id)\n # delay = 260 # can hardcode delays here for testing\n delay_time = timedelta(seconds=abs(delay))\n adjust_arrival = normal_arrival + (delay_time * (-1 if delay < 0 else 1))\n adjust_time = adjust_arrival.strftime(TIME_FORMAT)\n difference = (adjust_arrival - previous_arrival).total_seconds()\n if difference != 0:\n # TODO Update Calendar Event with New Arrival Time\n color = YELLOW if difference < 0 else RED\n print(difference, delay)\n if delay == 0:\n color = GREEN\n text = normal_template.format(\n normal_time=normal_time,\n direction=direction,\n )\n else:\n text = delayed_template.format(\n normal_time=normal_time, \n direction=direction, \n adjust_time=adjust_time, \n delay=('+' if delay > 0 else '-') + str(delay_time)\n )\n post_slack(title=stop_id,text=text,color=color)\n \n return {\n 'statusCode': 200,\n }\n\n# This allows for easier local testing\nif __name__ == '__main__':\n print('Invoking local Lambda handler...')\n lambda_handler(None, None)\n","repo_name":"andykessler/MetraUpdates","sub_path":"poll.py","file_name":"poll.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15296845807","text":"from cherry_rl.algorithms.kl_divergence import kl_divergence\nfrom cherry_rl.algorithms.optimizers.model_optimizer import ModelOptimizer\n\n\nclass PolicyOptimizer(ModelOptimizer):\n \"\"\"\n Simple policy-model optimizer.\n\n It used by 'Behavioral Cloning from Observations'.\n Accepts distribution / action as target\n and minimize D_KL / MSE between it and policy.\n \"\"\"\n def __init__(\n self,\n policy_model,\n learning_rate=3e-4,\n clip_grad=0.5,\n entropy=0.0\n ):\n super().__init__(policy_model, learning_rate, clip_grad)\n self.entropy = entropy\n\n def _policy_loss(\n self,\n observations, target_policy, mask,\n target_deterministic\n ):\n policy, _, _ = self.model(observations, None)\n entropy = self.model.pi_distribution.entropy(policy)\n if target_deterministic:\n policy, _ = self.model.pi_distribution.sample(policy, deterministic=True)\n policy_loss = 0.5 * (policy - target_policy) ** 2\n policy_loss = policy_loss.mean(-1)\n else:\n # We want to move policy towards idm prediction.\n # I believe it corresponds to minimizing D_KL(idm, policy).\n policy_loss = kl_divergence(\n self.model.pi_distribution_str, target_policy, policy\n )\n\n policy_loss = self._average_loss(policy_loss, mask)\n entropy_loss = self._average_loss(entropy, mask)\n loss = policy_loss - self.entropy * entropy_loss\n loss_dict = {\n 'mse_policy_loss' if target_deterministic else 'd_kl_loss': policy_loss.item(),\n 'entropy': entropy_loss.item(),\n 'policy_loss': loss.item()\n }\n return loss, loss_dict\n\n def train(self, observations, target_policy, mask, target_deterministic=False):\n loss, result = self._policy_loss(observations, target_policy, mask, target_deterministic)\n policy_model_grad_norm = self.optimize_loss(loss)\n result.update({'policy_model_grad_norm': policy_model_grad_norm})\n return result\n","repo_name":"CherryPieSexy/imitation_learning","sub_path":"cherry_rl/algorithms/optimizers/bco/d_kl_policy_optimizer.py","file_name":"d_kl_policy_optimizer.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"81"} +{"seq_id":"71554315465","text":"# 🚨 Don't change the code below 👇\nprint(\"Welcome to the Love Calculator!\")\nname1 = input(\"What is your name? \\n\")\nname2 = input(\"What is their name? \\n\")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nname1_lower = name1.lower()\nname2_lower = name2.lower()\n\ntrue = 0\nlove = 0\n\n#True\n#firt name\ntrue += name1_lower.count(\"t\")\ntrue += name1_lower.count(\"r\")\ntrue += name1_lower.count(\"u\")\ntrue += name1_lower.count(\"e\")\n#second name\ntrue += name2_lower.count(\"t\")\ntrue += name2_lower.count(\"r\")\ntrue += name2_lower.count(\"u\")\ntrue += name2_lower.count(\"e\")\n\n#false\n#firt name\nlove += name1_lower.count(\"l\")\nlove += name1_lower.count(\"o\")\nlove += name1_lower.count(\"v\")\nlove += name1_lower.count(\"e\")\n#second name\nlove += name2_lower.count(\"l\")\nlove += name2_lower.count(\"o\")\nlove += name2_lower.count(\"v\")\nlove += name2_lower.count(\"e\")\n\n\npercentage = str(true) + str(love)\nprint(f\"\\nTimes true {true}, times False {love}\")\nprint(f\"The porcentage is {percentage}\")\n\nif int(percentage) <= 10 or int(percentage) >= 90:\n print(f\"Your score is {percentage}, you go togeter like a coke and mentos\")\nelif int(percentage) <= 40 and int(percentage) <= 50:\n print(f\"Your score is {percentage}, you alright togeter\")\nelse:\n print(f\"Your score is {percentage}.\\n\")","repo_name":"auscultarem/Python-Scripts","sub_path":"Love Calculator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10008480652","text":"# This is a recursive algorithm, meaning that the function is called within itself\r\nfrom numba import jit\r\n# 7.31.23 - I added the numba module to speed up calculations. The effect is dramatic\r\n\r\n@jit(nopython=True)\r\ndef Fibonacci(n):\r\n if n <= 1:\r\n return n\r\n else:\r\n return (Fibonacci(n-1) + Fibonacci(n-2))\r\n \r\n# Asks the user for the length of the sequence, then prints the result of our algo\r\nwhile True:\r\n n_terms = int(input(\"Length of Fibonacci sequence: \"))\r\n for i in range(n_terms):\r\n print(Fibonacci(i))\r\n","repo_name":"pdmIV/RecursiveFibonacci","sub_path":"Fibonacci Recursive.py","file_name":"Fibonacci Recursive.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25306034528","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 7 15:56:40 2020\r\n\r\n@author: cheerag.verma\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\nProblem : Given two strings, check if they are permutations of each other. Return true or false.\r\n Permutation means - length of both the strings should same and should contain same set of characters. \r\n Order of characters doesn't matter.\r\n\r\ninput : abcde\r\n baedc\r\n \r\noutput: true\r\n\r\n\r\n\"\"\"\r\n\r\n\r\nstr1 = \"abcf\"\r\nstr2 = \"cbad\"\r\nif len(str1)!= len(str2):\r\n print(\"false\")\r\n\r\nelse: \r\n for i in str2:\r\n if i not in str1:\r\n print(\"false\")\r\n break\r\n else:\r\n print(\"true\")","repo_name":"iamcheerag/Python-problems","sub_path":"Strings/assignment/Check Permutation.py","file_name":"Check Permutation.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"74599750346","text":"import pylab as plt\nimport torch\n\nMU = torch.tensor([8.4637, 0.2108, 0])\nSTD = torch.tensor([44.9969, 37.0469, 1])\n\ndef plot_example(ax, strokes):\n ax.axis('equal')\n xs, ys = [], []\n prev = 0, 0\n for dx, dy, end in strokes:\n x = dx + prev[0]\n y = dy + prev[1]\n xs.append(x)\n ys.append(-y)\n prev = x, y\n if end:\n ax.plot(xs, ys)\n xs, ys = [], []","repo_name":"jzbontar/handwriting_synthesis","sub_path":"extern.py","file_name":"extern.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23748804456","text":"# -*- coding:utf-8 -*-\nimport requests\nimport unittest\nfrom interface.add_order_test import AddOrderTest\n\n# 订单转签约接口\nclass OrderSigningTest (unittest.TestCase):\n\n def setUp(self):\n # 定义所有需要的URL链接\n self.base_url = \"http://10.0.4.104:12008/ddxf/order\"\n self.base_url_app_login = \"http://10.0.4.104:12008/ddxf/user/login\"\n self.i = AddOrderTest()\n self.i.setUp()\n self.orderId = self.i.add_order()\n self.base_url_signing = \"http://10.0.4.104:12008/ddxf/order/deal/status/%s\" % self.orderId\n self.case_name = \" \"\n\n # 定义请求参数\n self.body_data = {\"password\": \"Fdd123@@\", \"username\": \"新体验项目助理\"}\n\n def login_app(self):\n '''登录多多新房APP'''\n self.login = requests.post (self.base_url_app_login, json=self.body_data)\n # 获取后台返回的token\n self.a = self.login.json ()['data']\n self.token = self.a['token']\n return self.token\n\n def test_signing_test(self):\n '''签约操作流程'''\n self.head = {\"Content-Type\": \"application/json\",\n \"version\": \"4.0.4\",\n \"apiVersion\": \"4.0.4\",\n \"platform\": \"iOS\",\n \"deviceId\": \"A3D2498C-E831-4CCF-BC08-964CE5F0D9EF\",\n \"platformVersion\": \"11.4\",\n \"token\": self.login_app (),\n \"userId\": \"408899\",\n \"ocUserId\": \"408899\"\n }\n\n self.body_data_signing = {\"actionType\": 2, \"buildingNo\": \"A栋\", \"contractAmount\": 20000000,\n \"contractArea\": \"66.00\", \"eventTime\": 1546072646199, \"flatId\": 125512,\n \"houseId\": 54608, \"houseNo\": \"111\", \"houseResourceId\": 33698,\n \"orderAttachmentInput\": {\n \"contractImgUrls\": [\"https://fsupload.fangdd.net/image/U-ie650sSoCPrBNhhZs3RPvXIJY.jpg\"],\n \"custIdCardImgUrls\": [], \"otherUrls\": [],\n \"subcribeImgUrls\": [\"https://fsupload.fangdd.net/image/ipIorj4aQ-WnzaNlP7H6dOQNovM.jpg\"],\n \"subcribeReceiptImgUrls\": [], \"type\": 20 },\n \"orderNote\": \"小木鸟的签约操作\", \"packageId\": 501438,\"orderType\":2,\n \"predictTime\": 1540224000000, \"salesAmount\": 10000000, \"unitNo\": \"1227001\"\n }\n self.r = requests.post (self.base_url_signing,json=self.body_data_signing,headers=self.head)\n result = self.r.json()\n print (result)\n self.assertEqual(result['code'],200)\n\n\nif __name__ == '__main__':\n unittest.mian\n","repo_name":"slp520/AutoTest","sub_path":"interface/order_signing_test.py","file_name":"order_signing_test.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40429922165","text":"#! /usr/bin/env python3\n# mapIt.py\n# command line or clipboard\n\nimport webbrowser, sys, pyperclip\nif len(sys.argv) > 1:\n # Get address from the command line\n address = ' '.join(sys.argv[1:])\nelse:\n # Get address from the clipboard\n address = pyperclip.paste()\nwebbrowser.open('https://google.com/maps/place/' + address)\n","repo_name":"Amelen0/Python-Projects","sub_path":"mapIt.py","file_name":"mapIt.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38407146904","text":"# In[] Libs\r\nimport tkinter as tk\r\n\r\n# In[] Initializations\r\nwindow = tk.Tk()\r\nwindow.title(\"Working with Frames in TKinter\")\r\n\r\n# In[] Add frames and widgets\r\n# Assigning Widgets to Frames With Frame Widgets\r\nframe_a = tk.Frame()\r\nframe_b = tk.Frame()\r\n\r\nlabel_a = tk.Label(master=frame_a, text=\"I'm in Frame A\")\r\nlabel_a.pack()\r\n\r\nlabel_b = tk.Label(master=frame_b, text=\"I'm in Frame B\")\r\nlabel_b.pack()\r\n\r\nframe_a.pack()\r\nframe_b.pack()\r\n\r\n# %% Adjusting Frame Appearance With Reliefs\r\n\"\"\" \r\nRelief Attribute: (https://realpython.com/python-gui-tkinter/#working-with-widgets)\r\n- tk.FLAT creates a frame that appears to be flat.\r\n- tk.SUNKEN adds a border that gives the frame the appearance of being sunken into the window.\r\n- tk.RAISED gives the frame a border that makes it appear to protrude from the screen.\r\n- tk.GROOVE adds a border that appears as a sunken groove around an otherwise flat frame.\r\n- tk.RIDGE gives the appearance of a raised lip around the edge of the frame.\r\n\"\"\"\r\nborder_effects = {\r\n \"flat\": tk.FLAT,\r\n \"sunken\": tk.SUNKEN,\r\n \"raised\": tk.RAISED,\r\n \"groove\": tk.GROOVE,\r\n \"ridge\": tk.RIDGE,\r\n}\r\n\r\nfor relief_name, relief in border_effects.items():\r\n frame = tk.Frame(master=window, relief=relief, borderwidth=5)\r\n frame.pack(side=tk.LEFT)\r\n label = tk.Label(master=frame, text=relief_name)\r\n label.pack()\r\n\r\n# In[] Run the App\r\nwindow.mainloop()\r\n","repo_name":"mustafa-sarshar/Python-GUIs","sub_path":"TKinter/03_widgets_frames.py","file_name":"03_widgets_frames.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26246501561","text":"from flask import Flask, jsonify\nfrom pymongo import MongoClient\nfrom pymongo.errors import ServerSelectionTimeoutError\nimport time\n\napp = Flask(__name__)\nmongo = MongoClient('mongo', 27017,\n connectTimeoutMS=1000, socketTimeoutMS=1000, serverSelectionTimeoutMS=1000,\n appname='demo-app')\ndb = mongo.sample_database\ncollection = db.widgets\n\n\n@app.route('/', methods=['GET'])\ndef hello():\n try:\n count = collection.count()\n return 'Hello!\\nThere are {} items in the database\\n'.format(count)\n except ServerSelectionTimeoutError:\n return \"Can't connect to MongoDB\\n\"\n\n@app.route('/add', methods=['POST'])\ndef add():\n try:\n collection.insert_one({\n 'timestamp': int(time.time())\n })\n except ServerSelectionTimeoutError:\n return \"Can't connect to MongoDB\\n\"\n return 'You have added an item to the database!\\n'\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n","repo_name":"RameshEY/aws-kube-codesuite","sub_path":"sample-app/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"4219821410","text":"import numpy as np\nimport tensorflow as tf\n\nfrom MobileNet_v2 import MobileNetV2\nfrom keras.layers.core import Activation\nfrom keras.layers import Dense,GlobalAveragePooling2D\nfrom keras.models import Model\n\nimport time\n\nimport cv2\nfrom keras.preprocessing import image\n\ndef load_image(img,img_width=224,img_height=224):\n img = cv2.resize(img, (img_width, img_height))\n img_tensor = image.img_to_array(img) # (height, width, channels)\n img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)\n img_tensor /= 255. # imshow expects values in the range [0, 1]\n return img_tensor\n\n# model = tf.keras.applications.MobileNetV2(\n# weights=\"imagenet\", input_shape=(224, 224, 3))\n\nweightPath = \"./weight.h5\"\nalpha_MNv2 = 0.5\ntest_image = './images/videof11.1obj1.jpg'\n\ndef loadMNv2(weightPath,alpha_MNv2):\n '''loading MobileNet_v2 model'''\n start_time_load = time.time()\n print('loading the MobileNet_v2 model...')\n\n classNum = 2\n img_width, img_height = 224,224\n\n base_model = MobileNetV2(input_shape=(img_width, img_height,3),\n alpha=alpha_MNv2,\n include_top=False,\n weights= None\n )\n\n x=base_model.output\n x=GlobalAveragePooling2D()(x)\n x=Dense(1024,activation='relu')(x)\n x=Dense(1024,activation='relu')(x)\n x=Dense(512,activation='relu')(x)\n preds=Dense(classNum,activation='softmax')(x) #final layer with softmax activation\n model_MNv2=Model(inputs=base_model.input,outputs=preds)\n model_MNv2.load_weights(weightPath)\n print('done')\n end_time_load = time.time()\n print('loading time of MobileNet_v2 model: ' + str(end_time_load-start_time_load) + 's')\n return model_MNv2\n\nmodel=loadMNv2(weightPath,alpha_MNv2)\nmodel.save('mnv2.h5')\n\nmodel2=tf.keras.models.load_model('mnv2.h5', compile=False)\n\nconverter = tf.lite.TFLiteConverter.from_keras_model(model2)\ntflite_model = converter.convert()\nopen(\"converted_model.tflite\", \"wb\").write(tflite_model)\n\ninterpreter = tf.lite.Interpreter(model_content=tflite_model)\ninterpreter.allocate_tensors()\n\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\n# input_shape = input_details[0]['shape']\n# input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)\n\ninput_data=load_image(cv2.imread(test_image))\n\ntf_results = model.predict(input_data)\nprint(tf_results)\n\ntf_results = model2(tf.constant(input_data))\nprint(tf_results)\n\ninterpreter.set_tensor(input_details[0]['index'], input_data)\n\ninterpreter.invoke()\n\ntflite_results = interpreter.get_tensor(output_details[0]['index'])\nprint(tflite_results)\n\n# for tf_result, tflite_result in zip(tf_results, tflite_results):\n# np.testing.assert_almost_equal(tf_result, tflite_result, decimal=5)","repo_name":"IoTDATALab/ACE-Evaluation","sub_path":"app/mnv2/keras2tflite.py","file_name":"keras2tflite.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22897524276","text":"name=input(str(\"Please enter your name:\"))\r\n#str limits user input to be of type string\r\n#input is keyword that prompts the user to enter a value from the keyboard\r\nage=int(input(\"Please enter your age:\"))\r\n#int limits user inpout to be of type integer\r\ndef SayYear(age):\r\n returnYear=100-age+2022\r\n return returnYear\r\nprint(\"Hello\", name, \", You will turn 100 years in the year\", SayYear(40))\r\n\r\n#pip command is used to install packages in python programming","repo_name":"OCHENGO/VINCENT","sub_path":"age.py","file_name":"age.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41700548954","text":"import pytest\nimport tempfile\nimport h5py\nimport json\nimport pathlib\nimport copy\nimport numpy as np\nimport os\nimport PIL.Image\nfrom itertools import product\n\nfrom ophys_etl.utils.array_utils import normalize_array\n\nfrom ophys_etl.utils.rois import (\n sanitize_extract_roi_list,\n extract_roi_to_ophys_roi)\n\nfrom ophys_etl.modules.roi_cell_classifier.compute_labeler_artifacts import (\n LabelerArtifactGenerator)\n\nfrom ophys_etl.modules.roi_cell_classifier.utils import (\n get_traces)\n\nfrom ophys_etl.modules.segmentation.graph_utils.conversion import (\n graph_to_img)\n\n\n@pytest.mark.parametrize(\n \"video_lower_quantile, video_upper_quantile,\"\n \"projection_lower_quantile, projection_upper_quantile, use_graph, \"\n \"with_motion_border\",\n product((0.1, 0.2), (0.7, 0.8), (0.1, 0.2), (0.7, 0.8),\n (True, False), (True, False)))\ndef test_labeler_artifact_generator(\n tmp_path_factory,\n classifier2021_video_fixture,\n classifier2021_video_hash_fixture,\n suite2p_roi_fixture,\n suite2p_roi_hash_fixture,\n classifier2021_corr_graph_fixture,\n classifier2021_corr_graph_hash_fixture,\n classifier2021_corr_png_fixture,\n classifier2021_corr_png_hash_fixture,\n video_lower_quantile,\n video_upper_quantile,\n projection_lower_quantile,\n projection_upper_quantile,\n use_graph,\n with_motion_border):\n \"\"\"\n Test that LabelerArtifactGenerator runs and produces expected output\n \"\"\"\n\n tmpdir = tmp_path_factory.mktemp('full_artifact_generation')\n if with_motion_border:\n motion_path = pathlib.Path(tempfile.mkstemp(dir=tmpdir,\n suffix='.csv')[1])\n with open(motion_path, 'w') as out_file:\n out_file.write('x,y\\n')\n out_file.write('5,6\\n')\n out_file.write('14,-3\\n')\n expected_motion_border = {'bottom': 6.0,\n 'top': 3.0,\n 'right_side': 14.0,\n 'left_side': 0.0}\n\n motion_path = str(motion_path.resolve().absolute())\n\n else:\n motion_path = None\n expected_motion_border = {'top': 0,\n 'bottom': 0,\n 'left_side': 0,\n 'right_side': 0}\n\n if use_graph:\n corr_fixture = classifier2021_corr_graph_fixture\n corr_hash = classifier2021_corr_graph_hash_fixture\n else:\n corr_fixture = classifier2021_corr_png_fixture\n corr_hash = classifier2021_corr_png_hash_fixture\n\n output_tuple = tempfile.mkstemp(dir=tmpdir,\n prefix='artifact_file_',\n suffix='.h5')\n\n # without this, got a \"too many files open\" error\n os.close(output_tuple[0])\n\n output_path = pathlib.Path(output_tuple[1])\n\n # because tempfile.mkstemp actually creates the file\n output_path.unlink()\n\n input_data = dict()\n input_data['video_path'] = str(classifier2021_video_fixture)\n input_data['roi_path'] = str(suite2p_roi_fixture)\n input_data['correlation_path'] = str(corr_fixture)\n input_data['artifact_path'] = str(output_path)\n input_data['clobber'] = False\n input_data['video_lower_quantile'] = video_lower_quantile\n input_data['video_upper_quantile'] = video_upper_quantile\n input_data['projection_lower_quantile'] = projection_lower_quantile\n input_data['projection_upper_quantile'] = projection_upper_quantile\n input_data['motion_border_path'] = motion_path\n\n generator = LabelerArtifactGenerator(input_data=input_data, args=[])\n generator.run()\n\n assert output_path.is_file()\n\n with h5py.File(output_path, 'r') as artifact_file:\n\n motion_border = json.loads(\n artifact_file['motion_border'][()].decode('utf-8'))\n assert motion_border == expected_motion_border\n # test that ROIs were written correctly\n with open(suite2p_roi_fixture, 'rb') as in_file:\n expected_rois = json.load(in_file)\n expected_rois = sanitize_extract_roi_list(expected_rois)\n\n artifact_rois = json.loads(\n artifact_file['rois'][()].decode('utf-8'))\n\n assert expected_rois == artifact_rois\n\n # test that all ROIs appear in color map\n color_map = json.loads(\n artifact_file['roi_color_map'][()].decode('utf-8'))\n assert len(color_map) == len(expected_rois)\n for roi in expected_rois:\n assert str(roi['id']) in color_map\n\n # test that traces were written correctly\n ophys_rois = [extract_roi_to_ophys_roi(roi)\n for roi in expected_rois]\n expected_traces = get_traces(classifier2021_video_fixture,\n ophys_rois)\n\n for roi_id in expected_traces:\n np.testing.assert_array_equal(\n expected_traces[roi_id],\n artifact_file[f'traces/{roi_id}'][()])\n\n # test that the scaled video data was written correctly\n assert artifact_file['video_data'].chunks is not None\n scaled_video = artifact_file['video_data'][()]\n\n with h5py.File(classifier2021_video_fixture, 'r') as raw_file:\n raw_video = raw_file['data'][()]\n raw_max = np.max(raw_video, axis=0)\n raw_avg = np.mean(raw_video, axis=0)\n\n mn, mx = np.quantile(raw_video, (video_lower_quantile,\n video_upper_quantile))\n\n raw_video = np.where(raw_video > mn, raw_video, mn)\n raw_video = np.where(raw_video < mx, raw_video, mx)\n delta = mx-mn\n raw_video = raw_video-mn\n raw_video = raw_video.astype(float)\n raw_video = np.round(255.0*raw_video/delta).astype(np.uint8)\n np.testing.assert_array_equal(raw_video, scaled_video)\n del raw_video\n del scaled_video\n\n # test that max and avg projection images wer written correctly\n for raw_img, img_key in zip((raw_max, raw_avg),\n ('max_projection', 'avg_projection')):\n artifact_img = artifact_file[img_key][()]\n mn, mx = np.quantile(raw_img,\n (projection_lower_quantile,\n projection_upper_quantile))\n raw_img = np.where(raw_img > mn, raw_img, mn)\n raw_img = np.where(raw_img < mx, raw_img, mx)\n raw_img = raw_img.astype(float)\n np.testing.assert_array_equal(raw_img, artifact_img)\n\n artifact_corr = artifact_file['correlation_projection'][()]\n if use_graph:\n expected_corr = normalize_array(\n graph_to_img(\n corr_fixture,\n attribute_name='filtered_hnc_Gaussian'))\n else:\n expected_corr = normalize_array(\n np.array(PIL.Image.open(corr_fixture, 'r')))\n\n np.testing.assert_array_equal(artifact_corr, expected_corr)\n\n metadata = json.loads(artifact_file['metadata'][()].decode('utf-8'))\n\n # test that metadata has the right contents\n assert metadata['video']['path'] == str(classifier2021_video_fixture)\n assert metadata['video']['hash'] == classifier2021_video_hash_fixture\n\n assert metadata['rois']['path'] == str(suite2p_roi_fixture)\n assert metadata['rois']['hash'] == suite2p_roi_hash_fixture\n\n assert metadata['correlation']['path'] == str(corr_fixture)\n assert metadata['correlation']['hash'] == corr_hash\n\n assert metadata['generator_args'] == input_data\n if with_motion_border:\n assert 'motion_csv' in metadata\n else:\n assert 'motion_csv' not in metadata\n\n tmpdir = pathlib.Path(tmpdir)\n path_list = [n for n in tmpdir.rglob('*')]\n for this_path in path_list:\n if this_path.is_file():\n try:\n this_path.unlink()\n except Exception:\n pass\n\n\ndef test_clobber_error(\n classifier2021_video_fixture,\n suite2p_roi_fixture,\n classifier2021_corr_graph_fixture,\n tmpdir):\n \"\"\"\n Test that the artifact generator will not let you over write an\n existing file unless you specify clobber=True\n \"\"\"\n\n output_path = tempfile.mkstemp(dir=tmpdir,\n prefix='artifact_file_',\n suffix='.h5')[1]\n\n output_path = pathlib.Path(output_path)\n assert output_path.exists()\n\n input_data = dict()\n input_data['video_path'] = str(classifier2021_video_fixture)\n input_data['roi_path'] = str(suite2p_roi_fixture)\n input_data['correlation_path'] = str(classifier2021_corr_graph_fixture)\n input_data['artifact_path'] = str(output_path)\n input_data['clobber'] = False\n\n with pytest.raises(RuntimeError, match='--clobber=True'):\n LabelerArtifactGenerator(input_data=input_data, args=[])\n\n input_data['clobber'] = True\n LabelerArtifactGenerator(input_data=input_data, args=[])\n\n\n@pytest.fixture(scope='session')\ndef well_made_config_fixture(\n classifier2021_video_fixture,\n suite2p_roi_fixture,\n tmp_path_factory):\n \"\"\"\n A dict representing the input_json for LabelerArtifactGenerator.\n This one will pass validation.\n \"\"\"\n\n tmpdir = tmp_path_factory.mktemp('for_config')\n corr_path = tempfile.mkstemp(dir=tmpdir, suffix='.pkl')[1]\n output_path = tempfile.mkstemp(dir=tmpdir, suffix='.h5')[1]\n\n input_data = dict()\n input_data['video_path'] = str(classifier2021_video_fixture)\n input_data['roi_path'] = str(suite2p_roi_fixture)\n input_data['correlation_path'] = str(corr_path)\n input_data['artifact_path'] = str(output_path)\n input_data['clobber'] = True\n\n yield input_data\n\n\n@pytest.mark.parametrize(\n 'bad_key',\n ['video_path', 'roi_path',\n 'correlation_path', 'artifact_path',\n None])\ndef test_sufix_validation(\n well_made_config_fixture,\n tmp_path_factory,\n bad_key):\n \"\"\"\n Test that if you specify a file with the wrong suffix as an input,\n you get an error\n \"\"\"\n\n tmpdir = tmp_path_factory.mktemp('to_test_config')\n bad_file = tempfile.mkstemp(dir=tmpdir, suffix='.txt')[1]\n\n input_data = copy.deepcopy(well_made_config_fixture)\n if bad_key is None:\n LabelerArtifactGenerator(input_data=input_data, args=[])\n else:\n input_data.pop(bad_key)\n input_data[bad_key] = bad_file\n with pytest.raises(ValueError, match='must have suffix'):\n LabelerArtifactGenerator(input_data=input_data, args=[])\n","repo_name":"AllenInstitute/ophys_etl_pipelines","sub_path":"tests/modules/roi_cell_classifier/test_labeler_artifact_module.py","file_name":"test_labeler_artifact_module.py","file_ext":"py","file_size_in_byte":10792,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"11300912849","text":"from django import template\nfrom django.conf import settings\n\nfrom wagtail.images.models import Image\n\nfrom home.models import Organization\n\nregister = template.Library()\n\n@register.inclusion_tag('tags/map.html', takes_context=True)\ndef show_map(context):\n business = Organization.objects.first()\n pos = business.geo_coordinates.split(',')\n\n return {\n 'address' : business.address,\n 'lat' : pos[0],\n 'long' : pos[1],\n 'google_map_api_key' : settings.GOOGLE_MAP_API_KEY,\n 'request': context['request'],\n }\n","repo_name":"marcanuy/keraban","sub_path":"home/templatetags/location_tags.py","file_name":"location_tags.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"30051175695","text":"# O(n^2) time | O(1) space\ndef twoNumberSum_v1(array, targetSum):\n for i in range(len(array) - 1):\n firstNum = array[i]\n for j in range(i + 1, len(array)):\n secondNum = array[j]\n if firstNum + secondNum == targetSum:\n return [firstNum, secondNum]\n return []\n\n\n# O(n) time | O(n) space\ndef twoNumberSum_v2(array, targetSum):\n nums = {} # create a hash-table\n\n for num in array:\n potentialMatch = targetSum - num\n if potentialMatch in nums:\n return [potentialMatch, num]\n else:\n nums[num] = True\n return []\n\n\n# O(nlog(n)) time | O(1) space\n# So, if you value space more than time, you would\n# probably choose this solution. Otherwise, you would choose the option with\n# a hash-table\ndef twoNumberSum_v3(array, targetSum):\n array.sort()\n left = 0\n right = len(array) - 1\n\n while left != right:\n currentSum = array[left] + array[right]\n if currentSum == targetSum:\n return [array[left], array[right]]\n elif currentSum < targetSum:\n left += 1\n elif currentSum > targetSum:\n right -= 1\n return []\n\n\ndef twoNumberSum_v4(array, targetSum):\n unique_set = set(array)\n\n for num in array:\n target = targetSum - num\n if target in unique_set and target is not num:\n return [num, target]\n \n return []\n ","repo_name":"azhalkouski/algorithms-and-problem-solving","sub_path":"two-number-sum/two-number-sum.py","file_name":"two-number-sum.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18134707035","text":"import csv\nfrom getImages import getAllBookingImages\nfrom googleSearch import bookingSearch\n\n\nfilename = 'temp.csv'\n\ndef readFile():\n print('Inside readFile')\n try:\n file = open(filename, 'r')\n reader = csv.reader(file)\n data = list(reader)\n\n newList = []\n for i in data:\n tuple = (i[0], i[1])\n newList.append(tuple)\n\n newList = newList[1 : ]\n return newList\n except:\n print('Exception in readFile')\n return None\n\n\ndef start():\n print('Inside start')\n try:\n data = readFile()\n\n for i in data:\n info = {'name' : i[0], 'location' : i[1]}\n searchString = 'booking.com' + ' ' + info['name'] + ' ' + info['location']\n print('searching: %%% ', searchString)\n queryDictionary = {'search' : searchString}\n\n url = None\n url = bookingSearch(queryDictionary)\n\n if url:\n getAllBookingImages(url, i[0], i[1])\n else:\n print('Booking.com could not be found for: ', i[0] + ', ' + i[1])\n except:\n print('Exception in start')\n\n\nif __name__ == '__main__':\n print('Starting the srcipt')\n start()\n print('Finished')\n","repo_name":"SarthakS93/LuxuryHotelsScrapper","sub_path":"imageScrapper.py","file_name":"imageScrapper.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13233451713","text":"import logging\nimport time\nimport csv\nimport json\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nfrom datetime import datetime\nfrom dateutil import parser\n\n\n# enable logging\nlogging.basicConfig(level=logging.INFO, \n format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s', \n datefmt=\"%Y-%m-%d %H:%M:%S\")\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\n# authorize the app to access Twitter on our behalf\nconsumer_key = \"gJCred2AjZNBNbXK7ggUXFExY\"\nconsumer_secret = \"2PvP06rmjcqIyKnpMC3NwI750wAj0JpRtlUusfvcWQT8exv3CF\"\naccess_token = '121360453-vR5X4kqGiXPzK3K7jlrSXxrBMxD3p2VoefLErdxg'\naccess_secret = 'hHpaAyUeniQQetYW2x9OxURn9HubBM07xza1BvuMdMG1Y'\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\napi = tweepy.API(auth)\n\n\n# establish open connection to streaming API\nclass MyListener(StreamListener):\n\n def on_data(self, data):\n try:\n tweet = parse_tweet(data)\n content = extract_content(tweet)\n with open('tweets.csv', 'a') as f: \n writer = csv.writer(f, quotechar = '\"')\n writer.writerow(content)\n #logger.info(content[3])\n\n except BaseException as e:\n logger.warning(e)\n\n return True\n\n def on_error(self, status):\n logger.warning(status)\n return True\n\n\n# parse data\ndef parse_tweet(data):\n\n # load JSON item into a dict\n tweet = json.loads(data)\n\n\n # check if tweet is valid\n if 'user' in tweet.keys():\n\n # parse date \n tweet['CREATED_AT'] = parser.parse(tweet['created_at'])\n\n # classify tweet type based on metadata\n if 'retweeted_status' in tweet:\n tweet['TWEET_TYPE'] = 'retweet'\n\n elif len(tweet['entities']['user_mentions']) > 0:\n tweet['TWEET_TYPE'] = 'mention'\n\n else:\n tweet['TWEET_TYPE'] = 'tweet'\n\n return tweet\n\n else:\n logger.warning(\"Incomplete tweet: %s\", tweet)\n\n\n# extract relevant data to write to CSV\ndef extract_content(tweet):\n content = [tweet['user']['screen_name'],\n\t tweet['user']['id'],\n tweet['CREATED_AT'].strftime('%Y-%m-%d %H:%M:%S'),\n tweet['TWEET_TYPE'],\n tweet['text'].encode('unicode_escape')]\n return content \n\n\ndef start_stream():\n\n while True:\n\n logger.warning(\"Twitter API Connection opened\")\n\n try:\n twitter_stream = Stream(auth, MyListener())\n twitter_stream.filter(track=['siliconvalley','siliconvalleyHBO','siliconHBO'])\n\n except Exception as e: \n logger.warning(e)\n continue\n\n finally:\n logger.warning(\"Twitter API Connection closed\")\n\n\nif __name__ == '__main__':\n start_stream()\n","repo_name":"tushargl016/finalproject","sub_path":"scripts/stream1.py","file_name":"stream1.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43800779825","text":"# This problem was asked by Microsoft.\n# Let's represent an integer in a linked list format by having each node represent a digit in the number. The nodes make up the number in reversed order.\n# For example, the following linked list:\n# 1 -> 2 -> 3 -> 4 -> 5\n# is the number 54321.\n# Given two linked lists in this format, return their sum in the same linked list format.\n# For example, given\n# 9 -> 9\n# 5 -> 2\n# return 124 (99 + 25) as:\n# 4 -> 2 -> 1\n####\n# Makeshift linked list class\nclass Node:\n def __init__(self, val = None, next = None):\n self.next = next\n self.val = val\n\n# This is used only for generating the input linked lists.\n# Using this for the overall solution is the naive way to solve the problem.\ndef get_linked_list(arr):\n ret_node = None\n for i in arr[::-1]:\n ret_node = Node(i, ret_node)\n return ret_node\n\ndef get_arr(ll):\n if not ll:\n return []\n return [ll.val] + get_arr(ll.next)\n\ninput_1 = get_linked_list([9, 9, 8])\ninput_2 = get_linked_list([5, 2])\n####\n# Recursive function call to find the sum and persist carryover to successive calls.\ndef linked_list_sum(list1, list2, carry = 0):\n # No more content to recurse on\n if not list1 and not list2 and not carry:\n return None\n\n val = 0\n next1 = None\n next2 = None\n\n if list1:\n val += list1.val\n next1 = list1.next\n\n if list2:\n val += list2.val\n next2 = list2.next\n\n val += carry\n val, carry = val % 10, val//10\n return Node(val, linked_list_sum(next1, next2, carry))\n\n####\noutput1 = linked_list_sum(input_1, input_2)\nprint(get_arr(output1))\n","repo_name":"whoophee/DCP","sub_path":"127.py","file_name":"127.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"29710228953","text":"import logging\n\nfrom bs4 import BeautifulSoup\n\nfrom shop.api_clients import BaseClient\n\nlogger = logging.getLogger(__name__)\n\n\nclass Parser(BaseClient):\n # base_url = 'https://www.olx.ua/d/uk/transport/legkovye-avtomobili/'\n base_url= 'https://www.olx.ua/d/uk/hobbi-otdyh-i-sport/muzykalnye-instrumenty/'\n\n def parse(self) -> list:\n response = self.get_request(\n method='get',\n )\n soup = BeautifulSoup(response, \"html.parser\")\n try:\n category_name = soup.find('div', attrs={'data-cy': 'category-dropdown'}).find('div').text\n except (AssertionError, IndexError) as err:\n logger.error(err)\n else:\n products_list = []\n for element in soup.find_all('div', attrs={'data-cy': 'l-card'}):\n try:\n name = element.find('h6').text\n price = element.find('p', attrs={'data-testid': \"ad-price\"}).text.split('.')[0]\n image_url = element.find('div', attrs={'type': \"list\"}).find('div').find('img').get(\"src\")\n products_list.append(\n {\n 'name': name,\n 'description': name,\n 'price': price,\n 'category': category_name,\n 'image': image_url,\n }\n )\n except (AssertionError, KeyError) as err:\n logger.error(err)\n return products_list\n\n\n\nproducts_parser = Parser()\n\n","repo_name":"maksimm56m67/Hillel-django-shop","sub_path":"items/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12724388127","text":"import sys\n\ninput = sys.stdin.readline\n\nv, e = map(int, input().split(\" \"))\nedgeList = []\nfor _ in range(e):\n a,b,c = map(int,input().split(\" \"))\n edgeList.append([a,b,c])\nedgeList.sort(key=lambda x: x[2])\nparent = [i for i in range(v+1)]\n\ndef find(target):\n if parent[target] == target:\n return target\n parent[target] = find(parent[target])\n return parent[target]\n\ndef union(a, b):\n aRoot = find(a)\n bRoot = find(b)\n if aRoot < bRoot:\n parent[bRoot] = aRoot\n else:\n parent[aRoot] = bRoot\n\nans = 0\nfor a,b,c in edgeList:\n if find(a) != find(b):\n union(a, b)\n ans += c\n\nprint(ans)\n\n","repo_name":"leesunmin1231/Coding_test","sub_path":"스터디/최소스패닝트리.py","file_name":"최소스패닝트리.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"282086426","text":"import json\nfrom flask import Flask, escape, request, render_template, Response\nfrom service.elasticsearch import find_product\nfrom service.tag_extraction import get_entity_by_tag, load_model\n\nmodels = {}\nmodels[\"target_model\"] = load_model(\"./service/model/final-model-target.pt\")\nmodels[\"non_target_model\"] = load_model(\"./service/model/final-model-non-target.pt\")\n\napp = Flask(__name__)\n\n@app.route('/', methods = ['POST','GET'])\ndef hello():\n return render_template('index.html')\n\n@app.route('/ajaxhandler', methods = ['POST','GET'])\ndef process_products():\n xml = ''\n keyword = request.form.get('keyword')\n product_list = find_product(keyword)\n return json.dumps(product_list)\n\n\n@app.route('/model', methods = ['POST','GET'])\ndef home(): \n return render_template('index_.html')\n\n@app.route('/ajaxhandlermodel', methods = ['POST','GET'])\ndef process_products_model():\n xml = ''\n keyword = request.form.get('keyword')\n tagged_sentences, tag_dict = get_entity_by_tag(query=keyword, models=models)\n #print(tag_dict)\n str1 = keyword.replace(' '.join(tag_dict['TARGET']),\"(\" +' '.join(tag_dict['TARGET'])+\")^2\")\n product_list = find_product(str1)\n print(str1)\n return json.dumps({\"tagged_sentences\": tagged_sentences, \"product_list\": product_list})\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host=\"0.0.0.0\",port=63500)","repo_name":"hoangdv-uet/IntentSearchAnalyzer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12272384656","text":"import tkinter\nimport time\nimport threading\nimport tkinter.messagebox\nfrom tkinter.ttk import Progressbar\n\nwindow = tkinter.Tk()\nwindow.title(\"Python GUI The Most Dangerous Writing App\")\nwindow.config(width=640, height=480, padx=50, pady=50)\n\nframeInput = tkinter.Frame(borderwidth=20)\ninputText = tkinter.Text(frameInput, width=30, height=5, font=(\"Courier\", 14))\nProgress_Bar = Progressbar(window, orient= tkinter.HORIZONTAL, length=250, mode='determinate')\n\nframeInput.pack()\ninputText.pack()\nProgress_Bar.pack()\n\nstart_time = time.perf_counter()\n\ndef startCounter():\n global start_time\n start_time = time.perf_counter()\n\ndef key_press(event):\n try :\n startCounter()\n except Exception as ex:\n print(ex)\n\ndef wipeTextThread():\n while True:\n delta_time = time.perf_counter() - start_time\n if delta_time > 5:\n print(\"More that 5 second Idle, wipe the entry\")\n inputText.delete('1.0', tkinter.END)\n inputText.focus()\n Progress_Bar['value'] = 0\n else:\n Progress_Bar['value'] = 100 - int(delta_time) * 20\n print(f\"{int(delta_time)} second idle\")\n time.sleep(0.5)\n\ninputText.bind('', key_press)\nthread_init = threading.Thread(target=wipeTextThread)\nthread_init.start()\n\nif __name__ == '__main__':\n inputText.focus()\n window.mainloop()","repo_name":"distareza/PythonGUI_DisapperingTextWritingApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10162846478","text":"from odoo import api, models\n\n\nclass AccountMove(models.Model):\n _inherit = \"account.move\"\n\n def _recompute_payment_terms_lines(self):\n _self = self\n if self.partner_id:\n _self = self.with_context(move_partner_id=self.partner_id.id)\n super(AccountMove, _self)._recompute_payment_terms_lines()\n\n @api.onchange(\"invoice_date_due\")\n def _onchange_invoice_date_due_account_payment_term_partner_holiday(self):\n \"\"\"Recompute the due date to the next available date according to\n the holiday periods set in the partner.\n\n It must only be re-calculated when a payment term is not set.\n This prevents the due date to be changed again and that another\n given number of days are added according to what is set on the\n payment term.\n \"\"\"\n if (\n self.invoice_date_due\n and self.partner_id\n and not self.invoice_payment_term_id\n ):\n new_invoice_date_due = self.partner_id._get_valid_due_date(\n self.invoice_date_due\n )\n if new_invoice_date_due != self.invoice_date_due:\n self.invoice_date_due = new_invoice_date_due\n\n def action_post(self):\n \"\"\"Inject a context for getting the partner when computing payment term.\n The trade-off is that we should split the call to super record per record,\n but it shouldn't impact in performance.\n \"\"\"\n for item in self:\n _item = item.with_context(move_partner_id=item.partner_id.id)\n super(AccountMove, _item).action_post()\n return True\n","repo_name":"rtmelektronik/odootest","sub_path":"account_payment_term_partner_holiday/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22609366096","text":"from libs.local import LocalCalc\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom math import log10\n\n\ndef get_main_pie(calc):\n # get the data\n date, lth, sth = calc.get_last_history()\n\n fig = make_subplots(rows=2, cols=1, specs=[[{'type': 'domain'}], [{'type': 'domain'}]], subplot_titles=('LTH vs STH', 'Log Percentage'))\n pie = go.Pie(values=[lth, sth], labels=['Long-Term Holding', 'Short-Term Holding'])\n log_pie = go.Pie(values=[log10(lth), log10(sth)], labels=['Long-Term Holding', 'Short-Term Holding'])\n fig.add_trace(pie, row=1, col=1)\n fig.add_trace(log_pie, row=2, col=1)\n fig.update_layout(title_text=f'{date:%b %d, %Y}', title_x=0.5)\n\n return fig\n\n\ndef get_top_address_pie(connection, top_count):\n cursor = connection.cursor()\n\n # get total balance\n (row, ) = cursor.execute('SELECT SUM(balance) FROM balance').fetchall()\n total_balance = row[0] * 1e-6\n\n # get top addresses\n rows = cursor.execute('SELECT * FROM balance ORDER BY balance DESC LIMIT {}'.format(top_count)).fetchall()\n\n # make arrays\n balances = []\n addresses = []\n for rows in rows:\n balance = rows[2] * 1e-6\n balances.append(balance)\n addresses.append(rows[1])\n\n # subtract top balances\n total_balance = total_balance - balance\n\n # add Others item\n balances.append(total_balance)\n addresses.append('Others')\n\n return go.Pie(values=balances, labels=addresses)\n\n\ndef get_history_line(calc):\n data = calc.get_history()\n\n # stack lines\n fig = go.Figure()\n fig.add_trace(go.Scatter(\n x=[row[0] for row in data],\n y=[row[1] for row in data],\n fill='tozeroy',\n name='LTH'))\n fig.add_trace(go.Scatter(\n x=[row[0] for row in data],\n y=[row[1] + row[2] for row in data],\n fill='tonexty',\n name='LTH + STH'))\n\n fig.update_layout(title={\n 'text': f'LTH vs STH over time',\n 'y': 1.0,\n 'x': 0.5,\n 'xanchor': 'center',\n 'yanchor': 'top'\n })\n\n return fig\n\n\ndef get_log_history_line(calc):\n data = calc.get_history()\n\n # stack lines\n fig = go.Figure()\n fig.add_trace(go.Scatter(\n x=[row[0] for row in data],\n y=[log10(row[1]) for row in data],\n fill='tozeroy',\n name='LTH'))\n fig.add_trace(go.Scatter(\n x=[row[0] for row in data],\n y=[log10(row[1] + row[2]) for row in data],\n fill='tonexty',\n name='LTH + STH'))\n\n fig.update_layout(title={\n 'text': f'Logarithmic LTH vs STH over time',\n 'y': 1.0,\n 'x': 0.5,\n 'xanchor': 'center',\n 'yanchor': 'top'\n })\n\n return fig\n\n\ndef main():\n calculator = LocalCalc()\n\n get_main_pie(calculator).show()\n get_history_line(calculator).show()\n get_log_history_line(calculator).show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"armansw/Anyblock-Ethereum-Analysis","sub_path":"do_anal.py","file_name":"do_anal.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15773312134","text":"import json\nimport requests\nimport random\nfrom characters import PhoenixWright\nfrom characters import MilesEdgeworth\nfrom characters import WinstonPayne\nfrom characters import ManfredVonKarma\nfrom characters import FranziskaVonKarma\nfrom characters import MiaFey\nfrom characters import Godot\nfrom characters import ApolloJustice\nfrom characters import KristophGavin\nfrom characters import KlavierGavin\nfrom characters import JacquesPortsman\nfrom characters import ShiLongLang\nfrom characters import CalistoYew\nfrom characters import QuercusAlba\nfrom characters import HershelLayton\nfrom characters import AthenaCykes\nfrom characters import ZachariasBarnham\nfrom characters import Darklaw\nfrom characters import GaspenPayne\nfrom characters import SimonBlackquill\n\nclass LongPollApi:\n def __init__(self, access_token, group_id, my_token):\n self.access_token = access_token\n self.group_id = group_id\n self.server = None\n self.key = None\n self.ts = None\n self.my_token=my_token\n self.set()\n self.pw=PhoenixWright(group_id,my_token)\n self.me=MilesEdgeworth(group_id,my_token)\n self.wp=WinstonPayne(group_id,my_token)\n self.mk=ManfredVonKarma(group_id,my_token)\n self.fk=FranziskaVonKarma(group_id,my_token)\n self.mf=MiaFey(group_id,my_token)\n self.da=Godot(group_id,my_token)\n self.aj=ApolloJustice(group_id,my_token)\n self.kr=KristophGavin(group_id,my_token)\n self.kl=KlavierGavin(group_id,my_token)\n self.jp=JacquesPortsman(group_id,my_token)\n self.sl=ShiLongLang(group_id,my_token)\n self.cy=CalistoYew(group_id,my_token)\n self.qa=QuercusAlba(group_id,my_token)\n self.hl=HershelLayton(group_id,my_token)\n self.ac=AthenaCykes(group_id,my_token)\n self.zb=ZachariasBarnham(group_id,my_token)\n self.dk=Darklaw(group_id,my_token)\n self.gp=GaspenPayne(group_id,my_token)\n self.sb=SimonBlackquill(group_id,my_token)\n self.count={'pw':3,'me':3,'wp':0,'mk':0,'fk':0,'mf':2,'da':0,'aj':3,'kr':0,'kl':0,'jp':0,'sl':0,'cy':0,'qa':0,'hl':1,'ac':3,'zb':0,'dk':0,'gp':0,'sb':0}\n self.func={'pw':self.pw.sayings,'me':self.me.sayings,'wp':self.wp.sayings,'mk':self.mk.sayings,'fk':self.fk.sayings,'mf':self.mf.sayings,'da':self.da.sayings,'aj':self.aj.sayings,'kr':self.kr.sayings,'kl':self.kl.sayings,'jp':self.jp.sayings,'sl':self.sl.sayings,'cy':self.cy.sayings,'qa':self.qa.sayings,'hl':self.hl.sayings,'ac':self.ac.sayings,'zb':self.zb.sayings,'dk':self.dk.sayings,'gp':self.gp.sayings,'sb':self.sb.sayings}\n print(\"init complete!\")\n def set(self):\n print(self.access_token)\n r = requests.get(\"https://api.vk.com/method/groups.getLongPollServer?group_id=\"+self.group_id+\"&access_token=\"+self.access_token+\"&v=5.103\").json()['response']\n print(r)\n self.server=r['server']\n self.key=r['key']\n self.ts=r['ts']\n def events(self):\n r = requests.get(self.server+\"?act=a_check&key=\"+self.key+\"&ts=\"+self.ts+\"&wait=5\").json()\n updates=r['updates']\n if updates:\n for i in updates:\n if i['type']=='message_new':\n obj=i['object']['message']\n print(obj)\n user_id=str(obj['from_id'])\n peer_id=str(obj['peer_id'])\n text=obj['text']\n if (text[0]=='/') :\n text=text[1:]\n if len(text)==4:\n name=text[0:2]\n num=int(text[3])\n if name in self.count:\n if num<=self.count[name]:\n random_id=str(random.randint(-9223372036854775808,9223372036854775807))\n response=requests.post(\"https://api.vk.com/method/messages.send?peer_id=\"+peer_id+\"&random_id=\"+random_id+\"&attachment=doc-\"+self.group_id+\"_\"+str(self.func[name][num])+\"&access_token=\"+self.access_token+\"&v=5.103\")\n\n print(response.json())\n self.ts=r['ts']\n","repo_name":"peacefulcrab/objection-bot","sub_path":"longpoll.py","file_name":"longpoll.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33617042556","text":"from langchain.smith.evaluation.config import RunEvalConfig, SingleKeyEvalConfig\nfrom langsmith.evaluation.evaluator import (\n EvaluationResult,\n run_evaluator,\n)\nfrom langsmith.schemas import Example, Run\n\nfrom langchain_benchmarks.extraction.tasks.chat_extraction.schema import GenerateTicket\n\n\n@run_evaluator\ndef json_schema(run: Run, example: Example) -> EvaluationResult:\n \"\"\"Evaluate the json schema of the generated ticket.\"\"\"\n score, comment = None, None\n try:\n GenerateTicket.parse_obj(run.outputs[\"output\"])\n score = 1\n except Exception as e:\n comment = repr(e)\n score = 0\n\n return EvaluationResult(\n key=\"json_schema\",\n score=score,\n comment=comment,\n )\n\n\n@run_evaluator\ndef evaluate_toxicity_similarity(run: Run, example: Example) -> EvaluationResult:\n \"\"\"Evaluate the toxicity of the generated ticket.\"\"\"\n gt = example.outputs[\"output\"][\"question\"][\"toxicity\"]\n score, comment = None, None\n # Toxicity should be a on scale from 0 to 5\n try:\n pred = run.outputs[\"output\"][\"question\"][\"toxicity\"]\n score = 1 - abs(gt - float(pred)) / 5\n except Exception as e:\n comment = repr(e)\n # Forgot to predict / mis-structured\n score = 0\n return EvaluationResult(\n key=\"toxicity_similarity\",\n score=score,\n comment=comment,\n )\n\n\n@run_evaluator\ndef evaluate_sentiment_similarity(run: Run, example: Example) -> EvaluationResult:\n \"\"\"Evaluate the sentiment of the generated ticket.\"\"\"\n gt = example.outputs[\"output\"][\"question\"][\"sentiment\"]\n ordinal_map = {\n \"negative\": 0,\n \"neutral\": 1,\n \"positive\": 2,\n }\n gt_score = ordinal_map.get(str(gt).lower())\n score, comment = None, None\n # Sentiment is an enum, \"Negative\", \"Neutral\", \"Positive\"\n try:\n pred = run.outputs[\"output\"][\"question\"][\"sentiment\"]\n pred_score = ordinal_map.get(str(pred).lower())\n score = 1 - (abs(gt_score - float(pred_score)) / 2)\n except Exception as e:\n comment = repr(e)\n # Forgot to predict / mis-structured\n score = 0\n return EvaluationResult(\n key=\"sentiment_similarity\",\n score=score,\n comment=comment,\n )\n\n\n@run_evaluator\ndef evaluate_confidence_level_similarity(\n run: Run, example: Example\n) -> EvaluationResult:\n \"\"\"Evaluate the confidence level of the generated ticket.\n This is a binary T/F question.\"\"\"\n gt = example.outputs[\"output\"][\"response\"][\"confidence_level\"]\n score, comment = None, None\n try:\n pred = run.outputs[\"output\"][\"response\"][\"confidence_level\"]\n score = 1 - (abs(gt - float(pred)) / 5)\n except Exception as e:\n comment = repr(e)\n score = 0\n return EvaluationResult(\n key=\"confidence_level_similarity\",\n score=score,\n comment=comment,\n )\n\n\n@run_evaluator\ndef evaluate_question_category_similarity(\n run: Run, example: Example\n) -> EvaluationResult:\n \"\"\"Evaluate the question category of the generated ticket.\n This is a binary T/F question.\"\"\"\n gt = example.outputs[\"output\"][\"question\"][\"question_category\"]\n\n score, comment = None, None\n try:\n pred = run.outputs[\"output\"][\"question\"][\"question_category\"]\n score = int(gt == pred)\n except Exception as e:\n comment = repr(e)\n # Forgot to predict / mis-structured\n score = 0\n return EvaluationResult(\n key=\"question_category\",\n score=score,\n comment=comment,\n )\n\n\n@run_evaluator\ndef evaluate_off_topic(run: Run, example: Example) -> EvaluationResult:\n \"\"\"Evaluate the off topic of the generated ticket.\n This is a binary T/F question.\"\"\"\n gt = example.outputs[\"output\"][\"question\"][\"is_off_topic\"]\n score, comment = None, None\n try:\n pred = run.outputs[\"output\"][\"question\"].get(\"is_off_topic\")\n score = int(gt == pred)\n except Exception as e:\n comment = repr(e)\n # Forgot to predict / mis-structured\n score = 0\n return EvaluationResult(\n key=\"off_topic_similarity\",\n score=score,\n comment=comment,\n )\n\n\n@run_evaluator\ndef evaluate_programming_language(run: Run, example: Example) -> EvaluationResult:\n \"\"\"Evaluate the programming language of the generated ticket.\n This is a binary T/F question.\"\"\"\n gt = example.outputs[\"output\"][\"question\"][\"programming_language\"]\n score, comment = None, None\n try:\n pred = run.outputs[\"output\"][\"question\"][\"programming_language\"]\n score = int(gt == pred)\n except Exception as e:\n comment = repr(e)\n # Forgot to predict / mis-structured\n score = 0\n return EvaluationResult(\n key=\"programming_language_similarity\",\n score=score,\n comment=comment,\n )\n\n\ndef get_eval_config() -> RunEvalConfig:\n \"\"\"Get the evaluation configuration for the chat extraction task.\"\"\"\n return RunEvalConfig(\n evaluators=[\n # General aggregate score\n SingleKeyEvalConfig(\n # input key is ignored.\n evaluator_type=\"json_edit_distance\",\n input_key=\"question\",\n )\n ],\n custom_evaluators=[\n json_schema,\n evaluate_toxicity_similarity,\n evaluate_sentiment_similarity,\n evaluate_confidence_level_similarity,\n evaluate_question_category_similarity,\n evaluate_off_topic,\n evaluate_programming_language,\n ],\n )\n","repo_name":"langchain-ai/langchain-benchmarks","sub_path":"langchain_benchmarks/extraction/tasks/chat_extraction/evaluators.py","file_name":"evaluators.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"81"} +{"seq_id":"18701656817","text":"n = int(input())\noly = []\nfor _ in range(n):\n # 참가국 학생번호 성적\n oly.append(list(map(int, input().split(' '))))\n\noly.sort(key=lambda x: x[2], reverse=True)\n\n# 상위 수상자 2명 차례대로 금메달 은메달\ngold = oly[0]\nsilver = oly[1]\n# 동메달 앞선 메달권 같은나라일 경우 X\nbronze = None\n\nfor i in range(2, len(oly)):\n if gold[0] == silver[0] == oly[i][0]:\n continue\n else:\n bronze = oly[i]\n break\n\nprint(gold[0],gold[1])\nprint(silver[0],silver[1])\nprint(bronze[0],bronze[1])","repo_name":"dev-dain/algorithm-study","sub_path":"source/suzy/week11/C2535.py","file_name":"C2535.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"28490055889","text":"def quicksort(arr):\n if len(arr) <= 1:\n return arr\n else:\n pivot = arr[0]\n left = []\n right = []\n for i in range(1, len(arr)):\n if arr[i] < pivot:\n left.append(arr[i])\n else:\n right.append(arr[i])\n return quicksort(left) + [pivot] + quicksort(right)\n\ndef main():\n test_cases = [\n ([], []),\n ([1], [1]),\n ([3, 2, 1], [1, 2, 3]),\n ([5, 4, 3, 2, 1], [1, 2, 3, 4, 5]),\n ([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]),\n ([5, 4, 3, 2, 1, 0], [0, 1, 2, 3, 4, 5]),\n ([1, 3, 2, 5, 4], [1, 2, 3, 4, 5]),\n ([1, 1, 1, 1, 1], [1, 1, 1, 1, 1]),\n ([1, 2, 3, 2, 1], [1, 1, 2, 2, 3]),\n ]\n for arr, expected in test_cases:\n result = quicksort(arr)\n assert result == expected, f\"quicksort({arr}) returned {result}, but expected {expected}\"\n\nif __name__ == \"__main__\":\n main()","repo_name":"saipragna25/chatgpt-interpreter-assignment","sub_path":"copilot_chat_code/test_quick_sort.py","file_name":"test_quick_sort.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36641156694","text":"from kivy.logger import Logger\nfrom random import sample\nfrom core import parse_word, number_2_word\n\n\nclass CardsPair():\n \"\"\"Podstawowa definicja pary kart\"\"\"\n\n def __init__(self, cid, fst, snd, dsc=None):\n \"\"\"\n @cid - identyfikator pary\n @fst - element znajdujacy sie na przedzie karty\n @snd - element znajdujacy sie na odwrocie\n @dsc - opcjonalny komunikat, ktory bedzie wyswietlany w dymku po dopasowaniu pary\n \"\"\"\n self.cid = cid\n self.fst = fst\n self.snd = snd\n self.dsc = parse_word(dsc)\n\n @staticmethod\n def get_random_pairs(count, index_file):\n \"\"\"\n Procedura zwraca określoną ilość losowo wybranych obeiktów typu CardsPair z kolekcji znajdującej się\n w pliku @index_file\n @count - oczekiwana liczba losowych kart\n @index_file - plik zawierajacy parametry kart\n \"\"\"\n\n try:\n with open(index_file, \"r\") as index:\n\n # Wczytanie i zainicjowanie listy wszystkich znanych par kart\n cards = [CardsPair(i, *line.split(';')) for i, line in enumerate(index.readlines())]\n\n # Wybranie losowo `count` elementow\n return sample(cards, count)\n\n # Jesli z jakis powodow nie udalo sie wygenreowac odpowiedniej ilosci par, to zwracamy - nic\n except Exception as e:\n Logger.debug(e.message)\n return []\n\n @staticmethod\n def get_random_numbers_as_pairs(count, nrange):\n \"\"\"\n Procedura zwraca określoną ilość losowo wybranych liczb z podanego zakresu i tworzy z nich obiekty CardsPair\n zawierające te właśnie liczby oraz ich reprezentację słowną\n @count - zadana liczba losowych kart\n @nrange - zakres losowania\n \"\"\"\n\n # Wylosowanie zadanej liczby przykladow\n samples = sample(nrange, count)\n\n # Zwrocenie ich w postaci CardsPair\n return [CardsPair(i, str(i), number_2_word(i), \"\") for i in samples]\n","repo_name":"phiotr/DysDroid","sub_path":"cardspair.py","file_name":"cardspair.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9537319083","text":"import math\nimport random\nimport torch\nimport numpy as np\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nTensor = torch.Tensor\n\n\nclass Parameters(object):\n def __init__(self):\n self.num_episodes = 2000\n self.solve_score = 13\n\n self.replay_capacity = 100000\n self.batch_size = 64\n\n self.update_target_frequency = 100\n\n self.learning_rate = 0.001\n self.gamma = 0.99\n\n self.e_greedy = 1.0\n self.e_greedy_min = 0.01\n self.e_greedy_decay = 200\n\n self.double = False\n self.dueling = False\n\n\nclass ExperienceReplay(object):\n def __init__(self, params):\n self.params = params\n\n self.memory = []\n self.position = 0\n\n def push(self, state, action, new_state, reward, done):\n transition = (state, action, new_state, reward, done)\n\n if self.position >= len(self.memory):\n self.memory.append(transition)\n else:\n self.memory[self.position] = transition\n\n self.position = (self.position + 1) % self.params.replay_capacity\n\n def sample(self):\n return zip(*random.sample(self.memory, self.params.batch_size))\n\n def __len__(self):\n return len(self.memory)\n\n\nclass NeuralNetwork(torch.nn.Module):\n def __init__(self, params, num_states, num_actions):\n super(NeuralNetwork, self).__init__()\n\n self.params = params\n self.num_states = num_states\n self.num_actions = num_actions\n\n self.fc1_size = 64\n self.fc2_size = 64\n\n self.fc1 = torch.nn.Linear(num_states, self.fc1_size)\n self.fc2 = torch.nn.Linear(self.fc1_size, self.fc2_size)\n self.fc3 = torch.nn.Linear(self.fc2_size, self.num_actions)\n\n self.activation = torch.nn.ReLU()\n\n def forward(self, state):\n x = self.activation(self.fc1(state))\n x = self.activation(self.fc2(x))\n\n return self.fc3(x)\n\n\nclass DuelingNeuralNetwork(torch.nn.Module):\n def __init__(self, params, num_states, num_actions):\n super(DuelingNeuralNetwork, self).__init__()\n\n self.num_states = num_states\n self.num_actions = num_actions\n\n self.fc1_size = 64\n self.fc2_size = 64\n\n self.fc1 = torch.nn.Linear(num_states, self.fc1_size)\n self.advantage = torch.nn.Linear(self.fc1_size, self.num_actions)\n self.value = torch.nn.Linear(self.fc1_size, 1)\n\n self.activation = torch.nn.ReLU()\n\n def forward(self, state):\n x = self.activation(self.fc1(state))\n\n advantage_output = self.advantage(x)\n value_output = self.value(x)\n\n final_output = value_output + advantage_output - advantage_output.mean()\n\n return final_output\n\n\nclass Agent(object):\n def __init__(self, params, num_states, num_actions, memory):\n self.params = params\n self.num_states = num_states\n self.num_actions = num_actions\n self.memory = memory\n\n NN = NeuralNetwork\n if self.params.dueling:\n NN = DuelingNeuralNetwork\n\n self.nn = NN(self.params, self.num_states, self.num_actions).to(device)\n self.target_nn = NN(self.params, self.num_states, self.num_actions).to(device)\n\n self.loss_function = torch.nn.MSELoss()\n self.optimizer = torch.optim.Adam(params=self.nn.parameters(), lr=self.params.learning_rate)\n\n self.total_steps = 0\n self.target_updated_count = 0\n\n def save_model(self, path):\n torch.save(self.nn.state_dict(), path)\n\n def load_model(self, path):\n self.nn.load_state_dict(torch.load(path))\n\n def get_epsilon(self, total_steps):\n epsilon = self.params.e_greedy_min + (self.params.e_greedy - self.params.e_greedy_min) * \\\n math.exp(-1. * total_steps / self.params.e_greedy_decay)\n return epsilon\n\n def get_action(self, state):\n self.total_steps += 1\n\n random_number = torch.rand(1)[0]\n epsilon = self.get_epsilon(self.total_steps)\n\n if random_number > epsilon:\n with torch.no_grad():\n state = Tensor(state).to(device)\n action = self.nn(state)\n action = torch.max(action, 0)[1].item()\n else:\n action = np.random.randint(self.num_actions)\n\n return action\n\n def optimize(self):\n if len(self.memory) < self.params.batch_size:\n return\n\n state, action, new_state, reward, done = self.memory.sample()\n\n state = Tensor(state).to(device)\n new_state = Tensor(new_state).to(device)\n\n reward = Tensor(reward).to(device)\n action = torch.LongTensor(action).to(device)\n done = Tensor(done).to(device)\n\n if self.params.double:\n new_state_indeces = self.nn(new_state).detach()\n max_new_state_indeces = torch.max(new_state_indeces, 1)[1]\n\n new_state_values = self.target_nn(new_state).detach()\n max_new_state_values = new_state_values.gather(1, max_new_state_indeces.unsqueeze(1)).squeeze(1)\n else:\n new_state_values = self.target_nn(new_state).detach()\n max_new_state_values = torch.max(new_state_values, 1)[0]\n\n target_value = reward + (1 - done) * self.params.gamma * max_new_state_values\n\n predicted_value = self.nn(state).gather(1, action.unsqueeze(1)).squeeze(1)\n\n loss = self.loss_function(predicted_value, target_value)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n if self.target_updated_count % self.params.update_target_frequency == 0:\n self.target_nn.load_state_dict(self.nn.state_dict())\n\n self.target_updated_count += 1","repo_name":"mexxik/drl-project-1","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16504010377","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nmyRange = input().split()\nhappiness = input().split()\nmyVal = []\nmyVal1 = input().split()\nmyVal2 = input().split()\nmySet1 = set()\nmySet2 = set()\ncount = 0\n\nfor i in range(len(happiness)):\n myVal.append(int(happiness[i]))\n\nfor j in range(len(myVal1)):\n mySet1.add(int(myVal1[j]))\n mySet2.add(int(myVal2[j]))\n\nfor k in range(len(happiness)):\n if int(happiness[k]) in mySet1:\n count+=1\n elif int(happiness[k]) in mySet2:\n count-=1\n else:\n count += 0\n\nprint(count)\n","repo_name":"KunjPathak12/DiggibyteInternshipModule1","sub_path":"noIdeaProblem.py","file_name":"noIdeaProblem.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31777173848","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doctor', '0002_remove_bancoimg_titulo'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='bancoimg',\n name='titulo',\n field=models.CharField(default='', max_length=500, null=True, blank=True),\n ),\n ]\n","repo_name":"atomychouse/normatividadsite_2021","sub_path":"doctor/migrations/0003_bancoimg_titulo.py","file_name":"0003_bancoimg_titulo.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30302935487","text":"import pygame\nimport colores\nfrom helpers import *\nfrom constantes import *\nfrom disparos import Disparar\n\n\n\nclass NavePpal:\n def __init__(self,posx,posy) -> None:\n self.quieto = get_superficie_sprite(PATH_IMG+\"player_ship.png\",4,1)\n self.muere = get_superficie_sprite(PATH_IMG+\"explosion.png\",3,2)\n self.muere = escalar(self.muere)\n self.frame = 0\n self.animacion = self.quieto\n self.imagen = self.animacion[self.frame]\n self.animacion_repetir = True\n self.tiempo = 0\n self.tiempo_inmune = 0\n self.limite_disparos = 5\n self.velocidad = 5\n self.vidas = 3\n self.score = 0\n # self.superficie = pygame.transform.scale(pygame.image.load(path),(ancho,alto))\n self.mostar = True\n self.disparos = []\n self.velocidad_disparo = 5\n self.inmune = False\n\n self.rectangulo = self.imagen.get_rect()\n self.rectangulo.centerx = posx\n self.rectangulo.y = posy\n \n def shot(self):\n if(self.limite_disparos > len(self.disparos)):\n disparo = Disparar(self.rectangulo.centerx,self.rectangulo.y)\n self.disparos.append(disparo)\n\n def movimiento(self,tiempo):\n if self.tiempo > 100:\n self.tiempo = 0\n if(self.frame < len(self.animacion)-1):\n self.frame +=1\n else:\n if(self.animacion_repetir):\n self.frame = 0\n else:\n self.mostar = False\n else:\n self.tiempo+=tiempo\n\n def disparar(self):\n if(len(self.disparos) > 0):\n if(self.disparos[0].rectangulo.y < 0 or not(self.disparos[0].mostrar)):\n self.disparos.pop(0)\n\n def update(self,tiempo):\n\n self.disparar()\n\n if(self.mostar):\n self.movimiento(tiempo)\n\n if(self.inmune):\n self.tiempo_inmune+=tiempo\n if(self.tiempo_inmune > 1000):\n self.limite_disparos = 5\n self.inmune = False\n self.tiempo_inmune=0\n\n if(self.animacion == self.muere):\n self.tiempo += tiempo\n if(self.tiempo > 1000):\n self.vidas -=1\n if(self.vidas > 0):\n self.frame = 0\n self.tiempo = 0\n self.animacion = self.quieto\n self.mostar = True\n self.animacion_repetir = True\n self.inmune = True\n self.rectangulo.centerx=int(ANCHO_VENTANA/2)\n else:\n #GAME OVER\n pass\n # self.limite_disparos=0\n\n def draw(self,pantalla):\n if(DEBUG):\n pygame.draw.rect(pantalla,colores.COLOR_AMARILLO_ARENA, self.rectangulo)\n if(self.mostar):\n self.imagen = self.animacion[self.frame]\n pantalla.blit(self.imagen, self.rectangulo)\n\n def control(self, accion):\n ancho_nave = self.rectangulo.width\n if(accion == \"STAY\"):\n lista_teclas = pygame.key.get_pressed()\n if lista_teclas[pygame.K_RIGHT] and self.rectangulo.x < ANCHO_VENTANA - ancho_nave:\n self.rectangulo.x = self.rectangulo.x + self.velocidad\n if lista_teclas[pygame.K_LEFT] and self.rectangulo.x > 0:\n self.rectangulo.x = self.rectangulo.x - self.velocidad\n if(accion == \"SHOT\"):\n self.shot()\n if(accion == \"DEAD\"):\n self.animacion = self.muere\n self.limite_disparos = 0\n self.animacion_repetir = False\n \n","repo_name":"Janegro09/segundoParcialLabo","sub_path":"personaje.py","file_name":"personaje.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28562333498","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 23 16:19:59 2020\n\n@author: kjh1\n\"\"\"\n'''''\nif key in list : return index\nif key not in list : return insertion point (minus)\n'''''\ndef binary_search(lst,key):\n low = 0\n high = len(lst) - 1\n \n \n while high >= low:\n mid = (low + high)//2\n if key < lst[mid]:\n high = mid -1\n elif key == lst[mid]:\n return mid\n else:\n low = mid + 1\n \n return -low -1\n \nmat = [1,2,2,2,3,4]\n\na = binary_search(mat,2)\n\nprint(a)\nprint(mat)\n\n#mat.insert(-a-1,3)\n#mat.insert(a,2)\n#print(mat)","repo_name":"kjh000/Algorithm","sub_path":"algo/리스트에서 이분탐색.py","file_name":"리스트에서 이분탐색.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11827550111","text":"import discord \nimport asyncio\nimport re \nimport datetime\nfrom database import Database\nfrom extractor import Extractor\nfrom message import Message\nfrom config import *\n\nclass Main:\n def __init__(self):\n self.client = discord.Client()\n self.client.loop.create_task(self.remind())\n self.db = Database()\n self.extractor = Extractor()\n self.message = Message()\n\n async def remind(self):\n await self.client.wait_until_ready()\n while not self.client.is_closed:\n now = datetime.datetime.now().strftime(\"%H:%M:00\")\n results = self.db.fetch_reminders(now)\n for result in results:\n task = result[2]\n message_to_send = \"Reminding @everyone to {0}\".format(task)\n channel = self.client.get_channel(str(result[6]))\n await self.client.send_message(channel, message_to_send)\n await asyncio.sleep(60) # task runs every 60 seconds\n\n def get_server_channel(self, server):\n channel_info = self.db.get_server_channel(server.id)\n if len(channel_info) > 0:\n channel = self.client.get_channel(str(channel_info[0][1]))\n return channel\n return None\n\n def run(self):\n @self.client.event\n async def on_ready():\n print('Logged in as:')\n print(self.client.user.name)\n\n @self.client.event\n async def on_message(message):\n if message.author == self.client.user:\n return\n\n message.content = message.content.lower()\n channel = self.get_server_channel(message.server)\n\n if message.content == '!init':\n message_to_send = self.message.message_for_init(message)\n await self.client.send_message(message.channel, message_to_send)\n return\n\n if channel is None:\n return await self.client.send_message(message.channel, 'Initialize a channel using `!init`')\n\n if '!list' in message.content:\n message_to_send = self.message.message_for_list(message.server.id)\n await self.client.send_message(channel, message_to_send)\n\n elif message.content == '!help':\n message_to_send = self.message.message_for_help()\n await self.client.send_message(channel, message_to_send)\n\n elif '!delete' in message.content:\n message_to_send = self.message.message_for_delete(message)\n await self.client.send_message(channel, message_to_send)\n\n elif '!remind' in message.content:\n message_to_send = self.message.message_for_remind(message)\n await self.client.send_message(channel, message_to_send)\n\n self.client.run(TOKEN)\n\nobj = Main()\nobj.run()","repo_name":"Navdevl/Sage","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"74166702985","text":"\"\"\"\n Contains classes for cluster based selectors.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom .nbs_base import NBSBase\nfrom ..utils.data_utils import get_avg_cluster_dissimilarity, get_avg_cluster_dissimilarity_from_file\nimport pandas as pd\nimport numpy as np\nimport time\n\nclass ClusterBasedSelector(NBSBase):\n \"\"\"\n Base class for next batch selectors based on cluster information.\n \"\"\"\n def __init__(self, \n training_loader,\n unlabeled_loader,\n trained_model,\n batch_size=384,\n intra_cluster_dissimilarity_threshold=0.0,\n feature_dist_func=\"tanimoto_dissimilarity\",\n dissimilarity_memmap_filename=None,\n use_consensus_distance=False):\n super(ClusterBasedSelector, self).__init__(training_loader,\n unlabeled_loader,\n trained_model,\n batch_size,\n intra_cluster_dissimilarity_threshold,\n feature_dist_func=feature_dist_func,\n dissimilarity_memmap_filename=dissimilarity_memmap_filename)\n # get clusters now since they are used in many calculations\n self.clusters_train = self.training_loader.get_clusters()\n self.clusters_unlabeled = self.unlabeled_loader.get_clusters()\n \n # keep track of clusters selected already\n self.selected_exploitation_clusters = []\n self.selected_exploration_clusters = []\n \n self.use_consensus_distance = use_consensus_distance\n \n def _get_avg_cluster_dissimilarity(self, selected_cluster_ids, candidate_cluster_ids):\n clusters_train_unlabeled = np.hstack([self.clusters_train, self.clusters_unlabeled])\n \n if self.dissimilarity_memmap_filename is None:\n train_size = self.training_loader.get_size()[0]\n cid_instances = np.in1d(clusters_train_unlabeled, np.hstack([selected_cluster_ids, candidate_cluster_ids]))\n clusters_train_unlabeled = clusters_train_unlabeled[cid_instances]\n \n # read features\n cid_instances_train = cid_instances[:train_size]\n cid_instances_unlabeled = cid_instances[train_size:]\n \n # slice out targeted cluster instances only\n features_train = self.training_loader.get_features(cid_instances_train)\n features_unlabeled = self.unlabeled_loader.get_features(cid_instances_unlabeled)\n if features_train.shape[0] > 0:\n features_train_unlabeled = np.vstack([features_train, features_unlabeled])\n else: # case for empty training set\n features_train_unlabeled = features_unlabeled\n del features_train, features_unlabeled, cid_instances\n \n if self.use_consensus_distance:\n consensus_features = np.zeros(shape=(len(selected_cluster_ids)+len(candidate_cluster_ids), \n features_train_unlabeled.shape[1]))\n consensus_clusters = np.zeros(shape=(len(selected_cluster_ids)+len(candidate_cluster_ids),))\n for i, cid in enumerate(np.hstack([selected_cluster_ids, candidate_cluster_ids])):\n cid_instances = np.where(clusters_train_unlabeled == cid)[0]\n cluster_features = features_train_unlabeled[cid_instances,:]\n consensus_features[i,:] = ((np.sum(cluster_features, axis=0) / cluster_features.shape[0]) >= 0.5).astype(float)\n consensus_clusters[i] = cid\n \n features_train_unlabeled = consensus_features\n clusters_train_unlabeled = consensus_clusters\n \n clusters_ordered_ids, avg_cluster_dissimilarity = get_avg_cluster_dissimilarity(clusters_train_unlabeled, \n features_train_unlabeled, \n selected_cluster_ids, \n candidate_cluster_ids,\n feature_dist_func=self.feature_dist_func)\n else:\n idx_ids_train_unlabeled = np.hstack([self.training_loader.get_idx_ids(), \n self.unlabeled_loader.get_idx_ids()])\n sorted_idx_ids = np.argsort(idx_ids_train_unlabeled)\n clusters_train_unlabeled = clusters_train_unlabeled[sorted_idx_ids]\n clusters_ordered_ids, avg_cluster_dissimilarity = get_avg_cluster_dissimilarity_from_file(clusters_train_unlabeled, \n self.dissimilarity_memmap_filename, \n len(clusters_train_unlabeled),\n selected_cluster_ids, \n candidate_cluster_ids)\n \n return clusters_ordered_ids, avg_cluster_dissimilarity\n \n def _get_candidate_exploitation_clusters(self):\n raise NotImplementedError\n \n def _get_candidate_exploration_clusters(self):\n raise NotImplementedError\n \n def _get_candidate_exploitation_instances_total(self, cluster_ids):\n raise NotImplementedError\n \n def _get_candidate_exploration_instances_total(self, cluster_ids):\n exploration_instances_total = np.sum(self._get_candidate_exploration_instances_per_cluster_count(cluster_ids))\n return exploration_instances_total\n \n def _get_candidate_exploration_instances_per_cluster_count(self, cluster_ids):\n clusters_idx = np.in1d(self.clusters_unlabeled, cluster_ids)\n u, candidate_exploration_instances_per_cluster = np.unique(self.clusters_unlabeled[clusters_idx], \n return_counts=True)\n sorted_idx = np.argsort(cluster_ids)\n rev_sorted_idx = np.zeros(len(u), dtype=int)\n rev_sorted_idx[sorted_idx] = np.arange(len(u)) # adapted from: https://stackoverflow.com/a/10831155\n candidate_exploration_instances_per_cluster = candidate_exploration_instances_per_cluster[rev_sorted_idx]\n return candidate_exploration_instances_per_cluster\n \n def _select_instances_from_clusters(self,\n candidate_clusters, \n total_budget,\n useExploitationStrategy=True):\n raise NotImplementedError\n \n \"\"\"\n Selects dissimilar instances from selected_cluster. \n \"\"\"\n def _select_dissimilar_instances_from_single_cluster(self,\n selected_cluster, \n cluster_budget,\n useIntraClusterThreshold=True):\n cluster_instance_idx = np.where(self.clusters_unlabeled == selected_cluster)[0]\n selected_cluster_instances, remaining_cluster_budget = self._select_dissimilar_instances(cluster_instance_idx, \n cluster_budget,\n useIntraClusterThreshold=useIntraClusterThreshold)\n return selected_cluster_instances, remaining_cluster_budget\n \n \"\"\"\n Selects instances from selected_cluster using a probability distribution without replacement. \n If instance_proba=None, then selects instances uniformly.\n \"\"\"\n def _select_random_instances_from_single_cluster(self,\n selected_cluster, \n cluster_budget,\n instance_proba=None):\n cluster_instance_idx = np.where(self.clusters_unlabeled == selected_cluster)[0]\n selected_cluster_instances, remaining_cluster_budget = self._select_random_instances(cluster_instance_idx, \n cluster_budget,\n instance_proba=instance_proba)\n return selected_cluster_instances, remaining_cluster_budget\n \n def select_next_batch(self):\n # get qualifying candidate exploitation and exploration clusters\n start_time=time.time()\n candidate_exploitation_clusters = self._get_candidate_exploitation_clusters()\n print(\"Done _get_candidate_exploitation_clusters. Took {} seconds. size: {}\".format(time.time()-start_time, candidate_exploitation_clusters.shape))\n start_time=time.time()\n candidate_exploration_clusters = self._get_candidate_exploration_clusters()\n print(\"Done _get_candidate_exploration_clusters. Took {} seconds. size: {}\".format(time.time()-start_time, candidate_exploration_clusters.shape))\n\n # get exploration and exploitation count estimates\n start_time=time.time()\n candidate_exploitation_instances_total = self._get_candidate_exploitation_instances_total(candidate_exploitation_clusters)\n print(\"Done _get_candidate_exploitation_instances_total: {}. Took {} seconds.\".format(candidate_exploitation_instances_total, time.time()-start_time))\n start_time=time.time()\n candidate_exploration_instances_total = self._get_candidate_exploration_instances_total(candidate_exploration_clusters)\n print(\"Done _get_candidate_exploration_instances_total: {}. Took {} seconds.\".format(candidate_exploration_instances_total, time.time()-start_time)) \n\n # compute budget assigned to exploitation vs exploration\n exploitation_budget = self._get_ee_budget(candidate_exploitation_instances_total, \n candidate_exploration_instances_total)\n\n # start selecting exploitation instances from exploitation clusters\n start_time=time.time()\n selected_exploitation_cluster_instances_pairs = self._select_instances_from_clusters(candidate_exploitation_clusters, \n exploitation_budget, \n useExploitationStrategy=True)\n print(\"Done _select_instances_from_clusters-candidate_exploitation_clusters: {}. Took {} seconds.\".format(sum([len(x[1]) for x in selected_exploitation_cluster_instances_pairs]), \n time.time()-start_time))\n # start selecting exploration instances from exploration clusters\n # update exploration_budget \n start_time=time.time()\n selected_exploitation_clusters = []\n selected_exploitation_instances_count = 0\n if len(selected_exploitation_cluster_instances_pairs) > 0:\n selected_exploitation_clusters = [x[0] for x in selected_exploitation_cluster_instances_pairs]\n selected_exploitation_instances = [x[1] for x in selected_exploitation_cluster_instances_pairs]\n selected_exploitation_instances_count = np.hstack(selected_exploitation_instances).shape[0]\n exploration_budget = self.batch_size - selected_exploitation_instances_count\n update_exploration_clusters = np.setdiff1d(candidate_exploration_clusters, selected_exploitation_clusters)\n candidate_exploration_clusters = update_exploration_clusters\n \n selected_exploration_cluster_instances_pairs = self._select_instances_from_clusters(candidate_exploration_clusters, \n exploration_budget, \n useExploitationStrategy=False)\n print(\"Done _select_instances_from_clusters-candidate_exploration_clusters: {}. Took {} seconds.\".format(sum([len(x[1]) for x in selected_exploration_cluster_instances_pairs]),\n time.time()-start_time))\n return (selected_exploitation_cluster_instances_pairs, \n selected_exploration_cluster_instances_pairs)\n \nclass ClusterBasedWCSelector(ClusterBasedSelector):\n \"\"\"\n Selects next batch based on cluster information.\n Weighted-Clusters (WC): Uses a exploitation-exploration weighting scheme to select among clusters, \n and then selects instances within a cluster either based on dissimlarity or randomness. (see params)\n See latest slides for more description.\n \"\"\"\n def __init__(self, \n training_loader,\n unlabeled_loader,\n trained_model,\n batch_size=384,\n intra_cluster_dissimilarity_threshold=0.0,\n feature_dist_func=\"tanimoto_dissimilarity\",\n dissimilarity_memmap_filename=None,\n use_consensus_distance=False,\n uncertainty_method=\"least_confidence\",\n select_dissimilar_instances_within_cluster=True,\n exploitation_use_quantile_for_activity=False,\n exploitation_sample_actives_from_clusters=False,\n exploitation_activity_threshold=0.75,\n exploitation_use_quantile_for_weight=False,\n exploitation_weight_threshold=0.5,\n exploitation_alpha=0.5,\n exploitation_dissimilarity_lambda=0.5,\n use_intra_cluster_threshold_for_exploitation=True,\n use_proportional_cluster_budget_for_exploitation=False,\n exploration_strategy=\"weighted\",\n exploration_use_quantile_for_weight=False,\n exploration_weight_threshold=0.5,\n exploration_beta=0.5,\n exploration_dissimilarity_lambda=0.5,\n use_intra_cluster_threshold_for_exploration=False,\n use_proportional_cluster_budget_for_exploration=True):\n super(ClusterBasedWCSelector, self).__init__(training_loader,\n unlabeled_loader,\n trained_model,\n batch_size,\n intra_cluster_dissimilarity_threshold,\n feature_dist_func=feature_dist_func,\n dissimilarity_memmap_filename=dissimilarity_memmap_filename,\n use_consensus_distance=use_consensus_distance)\n self.select_dissimilar_instances_within_cluster = select_dissimilar_instances_within_cluster\n self.uncertainty_method = uncertainty_method\n self.uncertainty_params_list = None\n if isinstance(uncertainty_method, list):\n self.uncertainty_method = uncertainty_method[0]\n if len(uncertainty_method) > 1:\n self.uncertainty_params_list = [self.feature_dist_func]\n self.uncertainty_params_list += uncertainty_method[1:]\n \n self.exploitation_use_quantile_for_activity = exploitation_use_quantile_for_activity\n self.exploitation_sample_actives_from_clusters = exploitation_sample_actives_from_clusters\n self.exploitation_activity_threshold = exploitation_activity_threshold\n self.exploitation_use_quantile_for_weight = exploitation_use_quantile_for_weight\n self.exploitation_weight_threshold = exploitation_weight_threshold\n self.exploitation_alpha = exploitation_alpha\n self.exploitation_dissimilarity_lambda = exploitation_dissimilarity_lambda\n self.use_intra_cluster_threshold_for_exploitation = use_intra_cluster_threshold_for_exploitation\n self.use_proportional_cluster_budget_for_exploitation = use_proportional_cluster_budget_for_exploitation\n \n self.exploration_strategy = exploration_strategy\n self.exploration_use_quantile_for_weight = exploration_use_quantile_for_weight\n self.exploration_weight_threshold = exploration_weight_threshold\n self.exploration_beta = exploration_beta\n self.exploration_dissimilarity_lambda = exploration_dissimilarity_lambda\n self.use_intra_cluster_threshold_for_exploration = use_intra_cluster_threshold_for_exploration\n self.use_proportional_cluster_budget_for_exploration = use_proportional_cluster_budget_for_exploration\n \n # create pandas df for various cluster calculations\n # creates only cluster IDs for clusters with unlabeled instances \n u_clusters, c_clusters = np.unique(self.clusters_unlabeled,\n return_counts=True)\n self.total_clusters = len(u_clusters)\n self.cluster_cols = ['Cluster ID', 'Cluster Mol Count',\n 'Density', 'Coverage', \n 'Mean Uncertainty', 'Mean Activity Prediction', 'Mean Cost',\n 'High Activity Prediction Count',\n 'Exploitation Weight', 'Exploration Weight']\n self.clusters_df = pd.DataFrame(data=np.nan*np.zeros((self.total_clusters, len(self.cluster_cols))),\n columns=self.cluster_cols)\n self.clusters_df['Cluster ID'] = u_clusters\n self.clusters_df['Cluster Mol Count'] = c_clusters\n self.clusters_df.index = self.clusters_df['Cluster ID']\n \n def _compute_cluster_densities(self):\n total_molecule_count = np.sum(self.clusters_df['Cluster Mol Count'])\n for ci in self.clusters_df['Cluster ID']:\n density_i = self.clusters_df['Cluster Mol Count'].loc[ci] / total_molecule_count\n self.clusters_df.loc[ci, 'Density'] = density_i\n \n def _compute_cluster_coverage(self):\n for ci in self.clusters_df['Cluster ID']:\n cluster_labeled_count = np.sum(self.clusters_train == ci)\n coverage_i = cluster_labeled_count / self.clusters_df['Cluster Mol Count'].loc[ci]\n self.clusters_df.loc[ci, 'Coverage'] = coverage_i\n\n def _compute_cluster_uncertainty(self):\n uncertainty_unlabeled = self.trained_model.get_uncertainty(X=self.unlabeled_loader.get_features(), \n uncertainty_method=self.uncertainty_method,\n uncertainty_params_list=self.uncertainty_params_list)\n for ci in self.clusters_df['Cluster ID']:\n mol_idx = np.where(self.clusters_unlabeled == ci)[0]\n cluster_uncertainty = uncertainty_unlabeled[mol_idx]\n avg_cluster_uncertainty_i = np.nan_to_num(np.mean(cluster_uncertainty))\n self.clusters_df.loc[ci, 'Mean Uncertainty'] = avg_cluster_uncertainty_i\n \n def _compute_cluster_activity_prediction(self):\n preds_unlabeled = self.trained_model.predict(self.unlabeled_loader.get_features())[:,0] # get first task for now. TODO: account for multi-task setting?\n for ci in self.clusters_df['Cluster ID']:\n mol_idx = np.where(self.clusters_unlabeled == ci)[0]\n cluster_preds = preds_unlabeled[mol_idx]\n if self.exploitation_use_quantile_for_activity:\n cluster_preds = cluster_preds[cluster_preds >= np.percentile(cluster_preds, 100.0*self.exploitation_activity_threshold)]\n else:\n cluster_preds = cluster_preds[cluster_preds >= self.exploitation_activity_threshold]\n avg_cluster_activity_i = np.nan_to_num(np.mean(cluster_preds))\n self.clusters_df.loc[ci, 'Mean Activity Prediction'] = avg_cluster_activity_i\n self.clusters_df.loc[ci, 'High Activity Prediction Count'] = len(cluster_preds)\n \n def _compute_cluster_cost(self):\n costs_unlabeled = self.unlabeled_loader.get_costs()\n for ci in self.clusters_df['Cluster ID']:\n mol_idx = np.where(self.clusters_unlabeled == ci)[0]\n avg_cluster_cost_i = np.nan_to_num(np.mean(costs_unlabeled[mol_idx]))\n self.clusters_df.loc[ci, 'Mean Cost'] = avg_cluster_cost_i\n \n def _compute_cluster_exploitation_weight(self):\n self.clusters_df['Exploitation Weight'] = (self.exploitation_alpha * self.clusters_df['Mean Activity Prediction']) + \\\n ((1 - self.exploitation_alpha) * self.clusters_df['Coverage'] * self.clusters_df['Density'])\n \n def _compute_cluster_exploration_weight(self):\n if self.exploration_strategy == \"weighted\":\n self.clusters_df['Exploration Weight'] = (self.exploration_beta * self.clusters_df['Mean Uncertainty']) + \\\n ((1 - self.exploration_beta) * (1 - self.clusters_df['Coverage']))\n elif self.exploration_strategy == \"dissimilar\":\n cluster_ids = self.clusters_df['Cluster ID'].values\n _, avg_cluster_dissimilarity = self._get_avg_cluster_dissimilarity(cluster_ids, \n cluster_ids)\n self.clusters_df['Exploration Weight'] = avg_cluster_dissimilarity\n \n def _get_candidate_exploitation_clusters(self):\n if self.exploitation_use_quantile_for_weight:\n qualifying_exploitation_clusters = self.clusters_df['Exploitation Weight'] >= np.percentile(self.clusters_df['Exploitation Weight'], 100.0*self.exploitation_weight_threshold)\n else:\n qualifying_exploitation_clusters = self.clusters_df['Exploitation Weight'] >= self.exploitation_weight_threshold\n candidate_exploitation_clusters = self.clusters_df[qualifying_exploitation_clusters]\n \n # remove exploitation clusters that do not have positive high activity prediction\n if self.exploitation_sample_actives_from_clusters:\n candidate_exploitation_clusters = candidate_exploitation_clusters[candidate_exploitation_clusters['High Activity Prediction Count'] > 0]\n \n candidate_exploitation_clusters = candidate_exploitation_clusters['Cluster ID'].values\n return candidate_exploitation_clusters\n \n def _get_candidate_exploration_clusters(self):\n if self.exploration_strategy == \"weighted\":\n if self.exploration_use_quantile_for_weight:\n qualifying_exploration_clusters = self.clusters_df['Exploration Weight'] >= np.percentile(self.clusters_df['Exploration Weight'], 100.0*self.exploration_weight_threshold)\n else:\n qualifying_exploration_clusters = self.clusters_df['Exploration Weight'] >= self.exploration_weight_threshold\n candidate_exploration_clusters = self.clusters_df[qualifying_exploration_clusters]['Cluster ID'].values\n elif self.exploration_strategy == \"random\":\n candidate_exploration_clusters = self.clusters_df['Cluster ID'].values\n else: # self.exploration_strategy == \"dissimilar\"\n candidate_exploration_clusters = self.clusters_df['Cluster ID'].values\n return candidate_exploration_clusters\n \n def _get_candidate_exploitation_instances_total(self, cluster_ids):\n exploitation_instances_total = np.sum(self.clusters_df.loc[cluster_ids, 'High Activity Prediction Count'])\n return exploitation_instances_total\n \n \"\"\"\n Currently not used. \n \"\"\"\n def _adjust_overlapping_clusters(self, candidate_exploitation_clusters, candidate_exploration_clusters):\n exploitation_clusters_to_drop = []\n exploration_clusters_to_drop = []\n overlapping_clusters = np.intersect1d(candidate_exploitation_clusters, candidate_exploration_clusters)\n for ci in overlapping_clusters:\n if self.clusters_df['Exploitation Weight'].loc[ci] >= self.clusters_df['Exploration Weight'].loc[ci]:\n exploration_clusters_to_drop.append(ci)\n else:\n exploitation_clusters_to_drop.append(ci)\n \n candidate_exploitation_clusters = candidate_exploitation_clusters[~candidate_exploitation_clusters.isin(exploitation_clusters_to_drop)]\n candidate_exploration_clusters = candidate_exploration_clusters[~candidate_exploration_clusters.isin(exploration_clusters_to_drop)]\n return candidate_exploitation_clusters, candidate_exploration_clusters\n \n def _select_instances_from_clusters(self,\n candidate_clusters, \n total_budget,\n useExploitationStrategy=True):\n selected_clusters_instances_pairs = None\n if useExploitationStrategy:\n selected_clusters_instances_pairs = self._select_instances_from_clusters_weighted(candidate_clusters, \n total_budget, \n self.exploitation_dissimilarity_lambda,\n weight_column='Exploitation Weight',\n useIntraClusterThreshold=self.use_intra_cluster_threshold_for_exploitation,\n useProportionalClusterBudget=self.use_proportional_cluster_budget_for_exploitation,\n selectDissimilarInstancesWithinCluster=self.select_dissimilar_instances_within_cluster)\n else:\n if self.exploration_strategy == \"random\":\n selected_clusters_instances_pairs = self._select_instances_from_clusters_random(candidate_clusters, \n total_budget,\n useProportionalClusterBudget=self.use_proportional_cluster_budget_for_exploration,\n selectDissimilarInstancesWithinCluster=self.select_dissimilar_instances_within_cluster)\n \n elif self.exploration_strategy == \"dissimilar\":\n selected_clusters_instances_pairs = self._select_instances_from_clusters_dissimilar(candidate_clusters, \n total_budget,\n useProportionalClusterBudget=self.use_proportional_cluster_budget_for_exploration,\n selectDissimilarInstancesWithinCluster=self.select_dissimilar_instances_within_cluster)\n else:\n selected_clusters_instances_pairs = self._select_instances_from_clusters_weighted(candidate_clusters, \n total_budget,\n self.exploration_dissimilarity_lambda,\n weight_column='Exploration Weight',\n useIntraClusterThreshold=self.use_intra_cluster_threshold_for_exploration,\n useProportionalClusterBudget=self.use_proportional_cluster_budget_for_exploration,\n selectDissimilarInstancesWithinCluster=self.select_dissimilar_instances_within_cluster)\n return selected_clusters_instances_pairs\n \n \"\"\"\n Helper method for selecting clusters using weight scheme.\n \"\"\"\n def _select_instances_from_clusters_weighted(self,\n candidate_clusters, \n total_budget,\n dissimilarity_lambda,\n weight_column='Exploitation Weight',\n useIntraClusterThreshold=True,\n useProportionalClusterBudget=False,\n selectDissimilarInstancesWithinCluster=True):\n selected_clusters_instances_pairs = []\n if len(candidate_clusters) == 0:\n return selected_clusters_instances_pairs\n \n curr_cluster_budget = 0\n curr_cluster_budget = np.nan_to_num(np.ceil(total_budget / len(candidate_clusters)))\n if useProportionalClusterBudget:\n cluster_unlabeled_counts = self._get_candidate_exploration_instances_per_cluster_count(candidate_clusters)\n total_unlabeled_counts = np.sum(cluster_unlabeled_counts)\n \n if weight_column == 'Exploitation Weight':\n preds_unlabeled = self.trained_model.predict(self.unlabeled_loader.get_features())[:,0]\n \n remaining_total_budget = total_budget\n if remaining_total_budget > 0:\n # select highest weighted cluster first\n cluster_weights = self.clusters_df[weight_column].loc[candidate_clusters].values\n curr_selected_cluster_idx = np.argsort(cluster_weights)[::-1][0]\n curr_selected_cluster = candidate_clusters[curr_selected_cluster_idx]\n \n if useProportionalClusterBudget:\n curr_cluster_budget = np.ceil(total_budget * (cluster_unlabeled_counts[curr_selected_cluster_idx] / total_unlabeled_counts))\n curr_cluster_budget = min(remaining_total_budget, curr_cluster_budget)\n \n cluster_instance_idx = np.where(self.clusters_unlabeled == curr_selected_cluster)[0]\n if weight_column == 'Exploitation Weight' and self.exploitation_sample_actives_from_clusters:\n cluster_preds = preds_unlabeled[cluster_instance_idx]\n if self.exploitation_use_quantile_for_activity:\n cluster_instance_idx = cluster_instance_idx[cluster_preds >= np.percentile(cluster_preds, 100.0*self.exploitation_activity_threshold)]\n else:\n cluster_instance_idx = cluster_instance_idx[cluster_preds >= self.exploitation_activity_threshold]\n if selectDissimilarInstancesWithinCluster:\n selected_instances_cluster, remaining_cluster_budget = self._select_dissimilar_instances(cluster_instance_idx, \n curr_cluster_budget,\n useIntraClusterThreshold=useIntraClusterThreshold)\n else:\n selected_instances_cluster, remaining_cluster_budget = self._select_random_instances(cluster_instance_idx, \n curr_cluster_budget)\n \n selected_clusters_instances_pairs.append((curr_selected_cluster,))\n selected_clusters_instances_pairs[-1] = selected_clusters_instances_pairs[-1] + (selected_instances_cluster,)\n remaining_total_budget -= len(selected_instances_cluster)\n \n rem_candidate_clusters = np.ones_like(candidate_clusters).astype(bool)\n rem_candidate_clusters[curr_selected_cluster_idx] = False\n prev_sum_cluster_dissimilarity = np.zeros_like(candidate_clusters)\n \n # select remaining clusters based on what was already selected\n i=1\n while i < len(candidate_clusters) and remaining_total_budget > 0:\n start_time1 = time.time()\n last_selected_cluster = selected_clusters_instances_pairs[-1][0]\n _, avg_cluster_dissimilarity = self._get_avg_cluster_dissimilarity([last_selected_cluster], \n candidate_clusters[rem_candidate_clusters])\n avg_cluster_dissimilarity = (avg_cluster_dissimilarity + prev_sum_cluster_dissimilarity[rem_candidate_clusters]) / len(selected_clusters_instances_pairs)\n \n cluster_weights = self.clusters_df[weight_column].loc[candidate_clusters[rem_candidate_clusters]].values\n # adjust cluster weights to include avg cluster dissimilarity\n adjusted_cluster_weights = dissimilarity_lambda * avg_cluster_dissimilarity + \\\n ((1 - dissimilarity_lambda) * cluster_weights)\n \n highest_w_idx = np.argsort(adjusted_cluster_weights)[::-1][0]\n curr_selected_cluster = candidate_clusters[rem_candidate_clusters][highest_w_idx]\n curr_selected_cluster_idx = np.where(candidate_clusters == curr_selected_cluster)[0]\n rem_candidate_clusters[curr_selected_cluster_idx] = False\n \n # process current cluster budget\n if useProportionalClusterBudget:\n curr_cluster_budget = np.ceil(total_budget * (cluster_unlabeled_counts[curr_selected_cluster_idx] / total_unlabeled_counts)) \n curr_cluster_budget = curr_cluster_budget + remaining_cluster_budget\n curr_cluster_budget = min(remaining_total_budget, curr_cluster_budget)\n \n cluster_instance_idx = np.where(self.clusters_unlabeled == curr_selected_cluster)[0]\n if weight_column == 'Exploitation Weight' and self.exploitation_sample_actives_from_clusters:\n cluster_preds = preds_unlabeled[cluster_instance_idx]\n if self.exploitation_use_quantile_for_activity:\n cluster_instance_idx = cluster_instance_idx[cluster_preds >= np.percentile(cluster_preds, 100.0*self.exploitation_activity_threshold)]\n else:\n cluster_instance_idx = cluster_instance_idx[cluster_preds >= self.exploitation_activity_threshold]\n if selectDissimilarInstancesWithinCluster:\n selected_instances_cluster, remaining_cluster_budget = self._select_dissimilar_instances(cluster_instance_idx, \n curr_cluster_budget,\n useIntraClusterThreshold=useIntraClusterThreshold)\n else:\n selected_instances_cluster, remaining_cluster_budget = self._select_random_instances(cluster_instance_idx, \n curr_cluster_budget)\n \n prev_sum_cluster_dissimilarity[rem_candidate_clusters] = np.delete(avg_cluster_dissimilarity, highest_w_idx) * len(selected_clusters_instances_pairs)\n \n selected_clusters_instances_pairs.append((curr_selected_cluster,))\n selected_clusters_instances_pairs[-1] = selected_clusters_instances_pairs[-1] + (selected_instances_cluster,)\n remaining_total_budget -= len(selected_instances_cluster)\n \n i+=1\n \n return selected_clusters_instances_pairs\n \n \"\"\"\n Helper method for selecting clusters in a random manner.\n \"\"\"\n def _select_instances_from_clusters_random(self,\n candidate_clusters, \n total_budget,\n useProportionalClusterBudget=False,\n selectDissimilarInstancesWithinCluster=True):\n selected_clusters_instances_pairs = []\n curr_cluster_budget = 0\n if len(candidate_clusters) != 0:\n curr_cluster_budget = np.ceil(total_budget / len(candidate_clusters))\n if useProportionalClusterBudget:\n cluster_unlabeled_counts = self._get_candidate_exploration_instances_per_cluster_count(candidate_clusters)\n total_unlabeled_counts = np.sum(cluster_unlabeled_counts)\n \n remaining_total_budget = total_budget\n i=0\n rem_clusters_idx = list(np.arange(len(candidate_clusters)))\n remaining_cluster_budget = 0\n while i < len(candidate_clusters) and remaining_total_budget > 0:\n curr_selected_cluster_idx = np.random.choice(rem_clusters_idx, size=1, replace=False)[0]\n rem_clusters_idx.remove(curr_selected_cluster_idx)\n curr_selected_cluster = candidate_clusters[curr_selected_cluster_idx]\n \n # process current cluster budget\n if useProportionalClusterBudget:\n curr_cluster_budget = np.ceil(total_budget * (cluster_unlabeled_counts[curr_selected_cluster_idx] / total_unlabeled_counts)) \n curr_cluster_budget = curr_cluster_budget + remaining_cluster_budget\n curr_cluster_budget = min(remaining_total_budget, curr_cluster_budget)\n \n cluster_instance_idx = np.where(self.clusters_unlabeled == curr_selected_cluster)[0]\n if selectDissimilarInstancesWithinCluster:\n selected_instances_cluster, remaining_cluster_budget = self._select_dissimilar_instances(cluster_instance_idx, \n curr_cluster_budget,\n useIntraClusterThreshold=False)\n else:\n selected_instances_cluster, remaining_cluster_budget = self._select_random_instances(cluster_instance_idx, \n curr_cluster_budget)\n selected_clusters_instances_pairs.append((curr_selected_cluster,))\n selected_clusters_instances_pairs[-1] = selected_clusters_instances_pairs[-1] + (selected_instances_cluster,)\n remaining_total_budget -= len(selected_instances_cluster)\n i+=1\n \n return selected_clusters_instances_pairs\n\n \"\"\"\n Helper method for selecting clusters in a dissimilar manner.\n \"\"\"\n def _select_instances_from_clusters_dissimilar(self,\n candidate_clusters, \n total_budget,\n useProportionalClusterBudget=False,\n selectDissimilarInstancesWithinCluster=True):\n selected_clusters_instances_pairs = []\n curr_cluster_budget = 0\n if len(candidate_clusters) != 0:\n curr_cluster_budget = np.ceil(total_budget / len(candidate_clusters))\n if useProportionalClusterBudget:\n cluster_unlabeled_counts = self._get_candidate_exploration_instances_per_cluster_count(candidate_clusters)\n total_unlabeled_counts = np.sum(cluster_unlabeled_counts)\n \n remaining_total_budget = total_budget\n rem_clusters_idx = list(np.arange(len(candidate_clusters)))\n remaining_cluster_budget = 0\n # select first cluster randomly\n if remaining_total_budget > 0:\n curr_selected_cluster_idx = np.random.choice(rem_clusters_idx, size=1, replace=False)[0]\n rem_clusters_idx.remove(curr_selected_cluster_idx)\n curr_selected_cluster = candidate_clusters[curr_selected_cluster_idx]\n \n # process current cluster budget\n if useProportionalClusterBudget:\n curr_cluster_budget = np.ceil(total_budget * (cluster_unlabeled_counts[curr_selected_cluster_idx] / total_unlabeled_counts)) \n curr_cluster_budget = curr_cluster_budget + remaining_cluster_budget\n curr_cluster_budget = min(remaining_total_budget, curr_cluster_budget)\n \n cluster_instance_idx = np.where(self.clusters_unlabeled == curr_selected_cluster)[0]\n if selectDissimilarInstancesWithinCluster:\n selected_instances_cluster, remaining_cluster_budget = self._select_dissimilar_instances(cluster_instance_idx, \n curr_cluster_budget,\n useIntraClusterThreshold=False)\n else:\n selected_instances_cluster, remaining_cluster_budget = self._select_random_instances(cluster_instance_idx, \n curr_cluster_budget)\n selected_clusters_instances_pairs.append((curr_selected_cluster,))\n selected_clusters_instances_pairs[-1] = selected_clusters_instances_pairs[-1] + (selected_instances_cluster,)\n remaining_total_budget -= len(selected_instances_cluster)\n \n # select remaining clusters so that they are dissimilar to clusters already selected\n prev_sum_cluster_dissimilarity = np.zeros_like(candidate_clusters)\n rem_candidate_clusters = np.ones_like(candidate_clusters).astype(bool)\n i=1\n while i < len(candidate_clusters) and remaining_total_budget > 0:\n last_selected_cluster = selected_clusters_instances_pairs[-1][0]\n _, avg_cluster_dissimilarity = self._get_avg_cluster_dissimilarity([last_selected_cluster], \n candidate_clusters[rem_candidate_clusters])\n avg_cluster_dissimilarity = (avg_cluster_dissimilarity + prev_sum_cluster_dissimilarity[rem_candidate_clusters]) / len(selected_clusters_instances_pairs)\n \n highest_w_idx = np.argsort(avg_cluster_dissimilarity)[::-1][0]\n curr_selected_cluster = candidate_clusters[rem_candidate_clusters][highest_w_idx]\n curr_selected_cluster_idx = np.where(candidate_clusters == curr_selected_cluster)[0]\n rem_candidate_clusters[curr_selected_cluster_idx] = False\n \n # process current cluster budget\n if useProportionalClusterBudget:\n curr_cluster_budget = np.ceil(total_budget * (cluster_unlabeled_counts[curr_selected_cluster_idx] / total_unlabeled_counts)) \n curr_cluster_budget = curr_cluster_budget + remaining_cluster_budget\n curr_cluster_budget = min(remaining_total_budget, curr_cluster_budget)\n \n cluster_instance_idx = np.where(self.clusters_unlabeled == curr_selected_cluster)[0]\n if selectDissimilarInstancesWithinCluster:\n selected_instances_cluster, remaining_cluster_budget = self._select_dissimilar_instances(cluster_instance_idx, \n curr_cluster_budget,\n useIntraClusterThreshold=useIntraClusterThreshold)\n else:\n selected_instances_cluster, remaining_cluster_budget = self._select_random_instances(cluster_instance_idx, \n curr_cluster_budget)\n \n prev_sum_cluster_dissimilarity[rem_candidate_clusters] = np.delete(avg_cluster_dissimilarity, highest_w_idx) * len(selected_clusters_instances_pairs)\n \n selected_clusters_instances_pairs.append((curr_selected_cluster,))\n selected_clusters_instances_pairs[-1] = selected_clusters_instances_pairs[-1] + (selected_instances_cluster,)\n remaining_total_budget -= len(selected_instances_cluster)\n \n i+=1\n \n return selected_clusters_instances_pairs\n \n def select_next_batch(self):\n start_time = time.time()\n # populate self.clusters_df\n self._compute_cluster_densities()\n self._compute_cluster_coverage()\n self._compute_cluster_uncertainty()\n self._compute_cluster_activity_prediction()\n self._compute_cluster_cost()\n \n # compute cluster exploitation and exploration weights\n self._compute_cluster_exploitation_weight()\n self._compute_cluster_exploration_weight()\n print(\"Done computing cluster properties. Took {} seconds.\".format(time.time()-start_time))\n \n selected_exploitation_cluster_instances_pairs, selected_exploration_cluster_instances_pairs = super(ClusterBasedWCSelector, self).select_next_batch()\n \n # account for case when we have more room in the budget \n # remaining budget allocated toward exploration by just picking the top-ranked unselected clusters by exploration weight\n selected_exploitation_clusters = []\n selected_exploitation_instances_count = 0\n selected_exploration_clusters = []\n selected_exploration_instances_count = 0\n if len(selected_exploitation_cluster_instances_pairs) > 0:\n selected_exploitation_clusters = [x[0] for x in selected_exploitation_cluster_instances_pairs]\n selected_exploitation_instances = [x[1] for x in selected_exploitation_cluster_instances_pairs]\n selected_exploitation_instances_count = np.hstack(selected_exploitation_instances).shape[0]\n \n if len(selected_exploration_cluster_instances_pairs) > 0:\n selected_exploration_clusters = [x[0] for x in selected_exploration_cluster_instances_pairs]\n selected_exploration_instances = [x[1] for x in selected_exploration_cluster_instances_pairs]\n selected_exploration_instances_count = np.hstack(selected_exploration_instances).shape[0]\n exploration_budget = self.batch_size - (selected_exploitation_instances_count + selected_exploration_instances_count)\n \n if exploration_budget > 0 and self.exploration_strategy == \"weighted\":\n candidate_exploration_clusters = np.setdiff1d(self.clusters_df['Cluster ID'].values, \n np.union1d(selected_exploitation_clusters, selected_exploration_clusters))\n \n candidate_exploration_clusters_weights = self.clusters_df['Exploration Weight'].loc[candidate_exploration_clusters].values\n top_k_exploration_clusters = np.argsort(candidate_exploration_clusters_weights)[::-1][:exploration_budget]\n candidate_exploration_clusters = candidate_exploration_clusters[top_k_exploration_clusters]\n \n remaining_selected_exploration_cluster_instances_pairs = self._select_instances_from_clusters(candidate_exploration_clusters, \n exploration_budget, \n useExploitationStrategy=False)\n selected_exploration_cluster_instances_pairs.extend(remaining_selected_exploration_cluster_instances_pairs)\n \n return (selected_exploitation_cluster_instances_pairs, \n selected_exploration_cluster_instances_pairs)","repo_name":"gitter-lab/active-learning-drug-discovery","sub_path":"active_learning_dd/next_batch_selector/cluster_based_selector.py","file_name":"cluster_based_selector.py","file_ext":"py","file_size_in_byte":49409,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"22437475010","text":"#!/usr/bin/env python3\n\"\"\"Inception block\"\"\"\n\n\nimport tensorflow.keras as K\n\n\ndef transition_layer(X, nb_filters, compression):\n \"\"\"Output of trans layer, number of filters\"\"\"\n init = K.initializers.he_normal()\n fil = int(nb_filters * compression)\n\n batch_norm = K.layers.BatchNormalization()(X)\n act = K.layers.Activation('relu')(batch_norm)\n conv = K.layers.Conv2D(filters=fil, kernel_size=1,\n padding='same', strides=1,\n kernel_initializer=init)(act)\n\n avg_pool = K.layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2),\n padding='same')(conv)\n\n return avg_pool, fil\n","repo_name":"faspen/holbertonschool-machine_learning","sub_path":"supervised_learning/0x08-deep_cnns/6-transition_layer.py","file_name":"6-transition_layer.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35835337607","text":"\nimport asyncio\nfrom rtcbot import Microphone, Speaker\n\nmicrophone = Microphone()\nspeaker = Speaker()\n\n\n@microphone.subscribe\ndef onData(data):\n data = data * 5\n if speaker.ready:\n speaker.put_nowait(data)\n\n\ntry:\n asyncio.get_event_loop().run_forever()\nfinally:\n microphone.close()\n speaker.close()\n","repo_name":"dkumor/rtcbot","sub_path":"examples/misc/audiolouder.py","file_name":"audiolouder.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"81"} +{"seq_id":"8494804286","text":"def enlarge(image):\r\n result = [[]]\r\n for index, line in enumerate(image):\r\n if index:\r\n result.append([])\r\n for elem in line:\r\n result[index].append(elem*2)\r\n result[index] = ''.join(result[index])\r\n for line in range(len(result)):\r\n result.append(result.pop(0))\r\n result.append(result[-1])\r\n return '\\n'.join(result)\r\n\r\nprint(enlarge(['@']))\r\nprint(enlarge(['****',\r\n '* *',\r\n '* *',\r\n '****']))\r\n","repo_name":"Oleganin/Exercise","sub_path":"enlarge.py","file_name":"enlarge.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2882049352","text":"from flask import Flask, request\n\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef getpost():\n qdata_msg = f'Query-data: \"{request.args.get(\"q\", \"No data provided\")}\"\\n'\n bdata_msg = f'Body-data: \"{request.form.get(\"title\", \"No data provided\")}\"'\n return qdata_msg + bdata_msg\n\n\n@app.route(\"/abra\", methods=[\"ABRAKADABRA\"])\ndef abrakadabra():\n \"\"\"Custom HTTP method\"\"\"\n return \"Response for Abrakadabra-method!\"\n\n\n@app.route(\"/getb\", methods=[\"GET\"])\ndef getbody():\n \"\"\"GET-Request with body\"\"\"\n return f'Body-data: \"{request.form.get(\"title\", \"No data provided\")}\"'\n\n\n@app.route(\"/postq\", methods=[\"POST\"])\ndef postq():\n \"\"\"POST-Request with query-parameters\"\"\"\n data_msg = (\n f'Query-data: query is \"{request.args.get(\"q\", \"No data provided\")}\", '\n f'page is \"{request.args.get(\"p\", \"No data provided\")}\"'\n )\n return data_msg\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"peskovdev/broken-http","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24921074084","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mar 12 17:47 2019\n\n@author: phongdk\n\"\"\"\n\nimport os\nimport glob\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport pickle\nimport sys\nfrom tqdm import tqdm\nfrom datetime import datetime\nimport logging\n\nsys.path.append('src/python/utils')\nsys.path.append('src/python/model')\nsys.path.append('src/python/db')\n# from utils import normalize_topic_distribution\nimport warnings\nfrom redisConnection import connectRedis, get_browser_id\nwarnings.filterwarnings(\"ignore\")\n\nOPTIMAL_THRESHOLD_FILENAME = 'Optimal_threshold.txt'\nBALANCE_THRESHOLD = 0.5\nGAP_INVENTORY = 0.05\nNUM_THREADS = 32\n\n\ndef convert_hashID_to_browser_id(df):\n LOGGER.info(\"Shape before convert HashId {}\".format(df.shape))\n r = connectRedis()\n df[\"browser_id\"] = df[\"user_id\"].apply(lambda x: get_browser_id(r, x))\n df.dropna(subset=[\"browser_id\"], inplace=True)\n LOGGER.info(\"Shape before convert HashId {}\".format(df.shape))\n return df\n\n\ndef prediction_stage(filename, path, target_label=1):\n LOGGER.info('-------------------- Load Test Data. ----------------------------')\n newest_model = sorted(os.listdir(directory), reverse=True)[0] # find the newest model to predict\n lgb_models = sorted(glob.glob(os.path.join(path, newest_model, \"*fold_[0-4].pkl\")))\n\n LOGGER.info('\\nMake predictions...\\n')\n models = [pickle.load(open(m_name, 'rb')) for m_name in lgb_models]\n lgb_result = []\n list_userid = []\n for chunk in tqdm(pd.read_csv(filename, compression='gzip', chunksize=chunk_size, index_col='user_id',\n dtype={'user_id': str})):\n LOGGER.info(chunk.shape)\n list_userid.extend(list(chunk.index))\n if \"gender\" in chunk.columns:\n chunk.drop(columns=['gender', 'age_group'], inplace=True)\n\n chunk_result = [model.predict_proba(chunk, num_threads=NUM_THREADS) for model in models] # prediction for each chunk\n chunk_result = np.array(chunk_result).mean(axis=0)\n\n lgb_result.extend(chunk_result)\n\n lgb_result = np.array(lgb_result, dtype=np.float16)\n\n if lgb_result.shape[1] == 2: # binary classification\n with open(os.path.join(directory, newest_model, OPTIMAL_THRESHOLD_FILENAME), 'r') as f:\n optimal_threshold = float(f.read()) if is_best_threshold else BALANCE_THRESHOLD\n LOGGER.info(\"------------------Using threshold --------- : {}\".format(optimal_threshold))\n final_result = np.array(lgb_result[:, 1] > optimal_threshold, dtype=np.int16)\n else:\n final_result = np.argmax(lgb_result, axis=1)\n\n df = pd.DataFrame({'user_id': list_userid, 'target': final_result})\n df = df[df['target'] == target_label]\n df = convert_hashID_to_browser_id(df)\n df['category_id'] = cate_id\n LOGGER.info(df.head())\n\n if cate_id2:\n final_result = np.array(lgb_result[:, 1] > optimal_threshold - GAP_INVENTORY, dtype=np.int16)\n df2 = pd.DataFrame({'user_id': list_userid, 'target': final_result})\n df2 = df2[df2['target'] == target_label]\n df2 = convert_hashID_to_browser_id(df2)\n df2['category_id'] = cate_id2\n df = pd.concat([df, df2])\n\n df[['browser_id', 'category_id']].to_csv(output_filename, compression='gzip', index=False, header=None, sep=' ')\n LOGGER.info(output_filename)\n\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-md\", \"--model_directory\", required=True, help=\"path to model directory\")\n ap.add_argument(\"-q\", \"--test\", required=True, help=\"path to testing file\")\n ap.add_argument(\"-o\", \"--output\", required=True, help=\"path to output file\")\n ap.add_argument(\"-cs\", \"--chunk_size\", required=False, nargs='?',\n help=\"chunk size for reading and processing a large file\", type=int, default=500000)\n ap.add_argument(\"-l\", \"--log_file\", required=False, help=\"path to log file\")\n ap.add_argument(\"-bt\", \"--best_threshold\", required=False, help=\"path to log file\", type=bool, default=True)\n ap.add_argument(\"-cid\", \"--cate_id\", required=True, help=\"Category ID\", type=int)\n ap.add_argument(\"-cid2\", \"--cate_id2\", required=False,\n help=\"Category ID 2 (if wanna more inventory)\", type=int)\n\n args = vars(ap.parse_args())\n directory = args['model_directory']\n test_filename = args['test']\n chunk_size = args['chunk_size']\n is_best_threshold = args['best_threshold']\n cate_id = args['cate_id']\n cate_id2 = args['cate_id2']\n\n output_filename = args['output'] if args['output'].endswith('.gz') else '{}.csv.gz'.format(args['output'])\n\n if args['log_file'] is not None:\n log_filename = args['log_file']\n else:\n LOG_DIR = \"logs\"\n if not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\n log_filename = os.path.join(LOG_DIR, datetime.today().strftime(\"%Y-%m-%d.log\"))\n\n logging.basicConfig(level=logging.INFO, filename=log_filename)\n LOGGER = logging.getLogger(\"main\")\n\n LOGGER.info('----' * 20 + \"{}\".format(datetime.today()))\n LOGGER.info(args)\n\n prediction_stage(test_filename, directory)\n","repo_name":"phongdk92/customTargeting","sub_path":"src/python/main/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2911187432","text":"# 백준에서 python3 대신 pypy3로 제출하니까 시간 초과가 뜨지 않고 성공.\nimport sys\n\ndef nQueen(start, n):\n # 함수 외부에서 지정한 변수 count에 계산을 실행하기 위해 전역 변수로 선언\n global count\n if start == n:\n count += 1\n return\n for i in range(n):\n if ch1[i] or ch2[n-i+start-1] or ch3[i+start]: continue\n ch1[i] = ch2[n-i+start-1] = ch3[i+start] = True\n nQueen(start+1, n)\n ch1[i] = ch2[n-i+start-1] = ch3[i+start] = False\n\nN, count = int(sys.stdin.readline()), 0\n# 세로, 대각선(\\,/)에 따라 가능 유무 판단\nch1, ch2, ch3 = [False] * N, [False] * (2*N-1), [False] * (2*N-1)\nnQueen(0, N)\nprint(count)","repo_name":"sjan137/solving_algorithm_problems","sub_path":"back_tracking/prob_05_N-Queen.py","file_name":"prob_05_N-Queen.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3503059979","text":"from copy import copy\n\nfrom .__external__ import FunctionWrapper\n\n\nclass Advice:\n def get_proxy(self, function):\n runner = self._get_runner(function)\n\n return FunctionWrapper(function, runner)\n\n def _get_runner(self, function):\n runtime_factory = self._get_runtime\n\n def runner(*args, **kwargs):\n runtime = runtime_factory(args, kwargs)\n\n runtime.before()\n\n try:\n result = function(*args, **kwargs)\n except BaseException as error:\n # Copy to prevent changes in error.traceback\n runtime.throw(copy(error))\n\n raise error\n else:\n runtime.after(result)\n\n return result\n\n return runner\n\n def _get_runtime(self, args, kwargs):\n raise NotImplementedError\n","repo_name":"timka-s/aspect-python","sub_path":"src/advice/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16782565164","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# 格式化成2016-03-20 11:45:39形式\n# print time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) \nimport rospy\nimport subprocess\nimport os\nimport time \nfrom std_msgs.msg import String\n\nclass RosLogger:\n\n def __init__(self, debug_mode=False):\n self.debug_mode = debug_mode\n self.log_status_publisher = rospy.Publisher('/gauss/gaus_node/gauss_commander_log', String, queue_size=1)\n\n def publish_log_status(self, level, data):\n if self.debug_mode:\n msg = String()\n now_time = time.strftime(\"%Y-%m-%d %H:%M:%S \", time.localtime())\n msg.data = \"gauss_commander \" + level + \" \"+ now_time + data \n self.log_status_publisher.publish(msg)\n\nif __name__ == '__main__':\n rospy.init_node('gauss_ros_logger')\n rl = RosLogger(debug_mode = True)\n while True:\n rl.publish_log_status(\"test\")\n rospy.spin()\n\n","repo_name":"TonyRobotics/gauss","sub_path":"gauss_commander/src/gauss_commander/gauss_ros_logger.py","file_name":"gauss_ros_logger.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"81"} +{"seq_id":"74822363466","text":"class Solution:\n # @param {integer[]} nums\n # @return {integer[][]}\n def subsetsWithDup(self, nums):\n result = [[]]\n nums.sort()\n \n for each in nums:\n count = len(result)\n for idx in range(count):\n elem = result[idx][:]\n elem.append(each)\n if elem not in result:\n result.append(elem)\n \n return result","repo_name":"AlgoLINE/algoLINE","sub_path":"14_August_2nd/gyeongwook/subsetsWithDup.py","file_name":"subsetsWithDup.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13980822573","text":"\"\"\"Command line functions for adapting acoustic models to new data\"\"\"\nfrom __future__ import annotations\n\nimport os\nfrom pathlib import Path\n\nimport rich_click as click\n\nfrom montreal_forced_aligner import config\nfrom montreal_forced_aligner.alignment import AdaptingAligner\nfrom montreal_forced_aligner.command_line.utils import (\n common_options,\n validate_acoustic_model,\n validate_dictionary,\n)\n\n__all__ = [\"adapt_model_cli\"]\n\n\n@click.command(\n name=\"adapt\",\n context_settings=dict(\n ignore_unknown_options=True,\n allow_extra_args=True,\n allow_interspersed_args=True,\n ),\n short_help=\"Adapt an acoustic model\",\n)\n@click.argument(\n \"corpus_directory\",\n type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path),\n)\n@click.argument(\"dictionary_path\", type=click.UNPROCESSED, callback=validate_dictionary)\n@click.argument(\"acoustic_model_path\", type=click.UNPROCESSED, callback=validate_acoustic_model)\n@click.argument(\n \"output_model_path\", type=click.Path(file_okay=True, dir_okay=False, path_type=Path)\n)\n@click.option(\n \"--output_directory\",\n help=\"Path to save alignments.\",\n type=click.Path(file_okay=False, dir_okay=True, path_type=Path),\n)\n@click.option(\n \"--config_path\",\n \"-c\",\n help=\"Path to config file to use for training.\",\n type=click.Path(exists=True, file_okay=True, dir_okay=False, path_type=Path),\n)\n@click.option(\n \"--speaker_characters\",\n \"-s\",\n help=\"Number of characters of file names to use for determining speaker, \"\n \"default is to use directory names.\",\n type=str,\n default=\"0\",\n)\n@click.option(\n \"--audio_directory\",\n \"-a\",\n help=\"Audio directory root to use for finding audio files.\",\n type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path),\n)\n@click.option(\n \"--output_format\",\n help=\"Format for aligned output files (default is long_textgrid).\",\n default=\"long_textgrid\",\n type=click.Choice([\"long_textgrid\", \"short_textgrid\", \"json\", \"csv\"]),\n)\n@click.option(\n \"--include_original_text\",\n is_flag=True,\n help=\"Flag to include original utterance text in the output.\",\n default=False,\n)\n@common_options\n@click.help_option(\"-h\", \"--help\")\n@click.pass_context\ndef adapt_model_cli(context, **kwargs) -> None:\n \"\"\"\n Adapt an acoustic model to a new corpus.\n \"\"\"\n if kwargs.get(\"profile\", None) is not None:\n os.environ[config.MFA_PROFILE_VARIABLE] = kwargs.pop(\"profile\")\n config.update_configuration(kwargs)\n config_path = kwargs.get(\"config_path\", None)\n output_directory = kwargs.get(\"output_directory\", None)\n output_model_path = kwargs.get(\"output_model_path\", None)\n corpus_directory = kwargs[\"corpus_directory\"].absolute()\n dictionary_path = kwargs[\"dictionary_path\"]\n acoustic_model_path = kwargs[\"acoustic_model_path\"]\n output_format = kwargs[\"output_format\"]\n include_original_text = kwargs[\"include_original_text\"]\n adapter = AdaptingAligner(\n corpus_directory=corpus_directory,\n dictionary_path=dictionary_path,\n acoustic_model_path=acoustic_model_path,\n **AdaptingAligner.parse_parameters(config_path, context.params, context.args),\n )\n\n try:\n adapter.adapt()\n if output_directory is not None:\n os.makedirs(output_directory, exist_ok=True)\n adapter.align()\n adapter.analyze_alignments()\n adapter.export_files(\n output_directory,\n output_format,\n include_original_text=include_original_text,\n )\n if output_model_path is not None:\n adapter.export_model(output_model_path)\n except Exception:\n adapter.dirty = True\n raise\n finally:\n adapter.cleanup()\n","repo_name":"MontrealCorpusTools/Montreal-Forced-Aligner","sub_path":"montreal_forced_aligner/command_line/adapt.py","file_name":"adapt.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":1099,"dataset":"github-code","pt":"81"} +{"seq_id":"25617272984","text":"import torch\nimport torch.nn.functional as F\n\ndef euler2mat(angle):\n \"\"\"Convert euler angles to rotation matrix.\n Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174\n Args:\n angle: rotation angle along 3 axis (in radians) -- size = [B, 3]\n Returns:\n Rotation matrix corresponding to the euler angles -- size = [B, 3, 3]\n \"\"\"\n B = angle.size(0)\n x, y, z = angle[:,0], angle[:,1], angle[:,2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach()*0\n ones = zeros.detach()+1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).reshape(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).reshape(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).reshape(B, 3, 3)\n\n rotMat = xmat @ ymat @ zmat\n return rotMat\n\ndef quat2mat(quat):\n \"\"\"Convert quaternion coefficients to rotation matrix.\n Args:\n quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3]\n Returns:\n Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]\n \"\"\"\n norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)\n return rotMat\n\n\ndef pose_vec2mat(vec, rotation_mode='euler'):\n \"\"\"\n Convert 6DoF parameters to transformation matrix.\n Args:s\n vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]\n Returns:\n A transformation matrix -- [B, 3, 4]\n \"\"\"\n translation = vec[:, 3:].unsqueeze(-1) # [B, 3, 1]\n rot = vec[:,:3]\n if rotation_mode == 'euler':\n rot_mat = euler2mat(rot) # [B, 3, 3]\n elif rotation_mode == 'quat':\n rot_mat = quat2mat(rot) # [B, 3, 3]\n transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4]\n return transform_mat\n\ndef inv_pose_vec(transform_mat, pt):\n # pt: B * 8 * 3 (X, Y, Z)\n # vec: B * 6 (Rx, Ry, Rz, Tx, Ty, Tz)\n pt = pt - transform_mat[:, :3, 3].unsqueeze(1).repeat(1,8,1)\n rot_Mat = transform_mat[:, :3, :3]\n inv_rotMat = torch.inverse(rot_Mat)\n inv_pt = pt.bmm(inv_rotMat)\n\n return inv_pt\n\ndef raydist_range(transform_mat, pt, src):\n inv_pt = inv_pose_vec(transform_mat, pt)\n #inv_pt = inv_pt.squeeze(0)\n inv_pt[:,:,2] = src - inv_pt[:,:,2]\n inv_pt = inv_pt.view(-1, 3)\n dist_pt = torch.sqrt(torch.mul(inv_pt[:,0], inv_pt[:,0]) + torch.mul(inv_pt[:,1], inv_pt[:,1]) + torch.mul(inv_pt[:,2], inv_pt[:,2]))\n dist_min = torch.min(dist_pt)\n dist_max = torch.max(dist_pt)\n return dist_min, dist_max\n\n'''\n# Former Definition\n\ndef inv_pose_vec(vec, transform_mat, pt):\n # pt: B * 8 * 3 (X, Y, Z)\n # vec: B * 6 (Rx, Ry, Rz, Tx, Ty, Tz)\n pt = pt - vec[:, 3:].unsqueeze(1).repeat(1,8,1)\n rot_Mat = transform_mat[:, :3, :3]\n inv_rotMat = torch.inverse(rot_Mat)\n inv_pt = pt.bmm(inv_rotMat)\n\n return inv_pt\n\ndef raydist_range(vec, transform_mat, pt, src):\n inv_pt = inv_pose_vec(vec, transform_mat, pt)\n #inv_pt = inv_pt.squeeze(0)\n inv_pt[:,:,2] = src - inv_pt[:,:,2]\n inv_pt = inv_pt.view(-1, 3)\n dist_pt = torch.sqrt(torch.mul(inv_pt[:,0], inv_pt[:,0]) + torch.mul(inv_pt[:,1], inv_pt[:,1]) + torch.mul(inv_pt[:,2], inv_pt[:,2]))\n dist_min = torch.min(dist_pt)\n dist_max = torch.max(dist_pt)\n return dist_min, dist_max\n'''\n","repo_name":"gaocong13/Projective-Spatial-Transformers","sub_path":"src/posevec2mat.py","file_name":"posevec2mat.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"81"} +{"seq_id":"72452463625","text":"\n# solving Laplace equation\n\ndef Laplace(i, j, func, delta_x, delta_y):\n b = delta_x / delta_y\n b_sqr = b*b\n F = func\n return (F(i+1,j) + F(i-1,j) + b_sqr * (F(i,j+1) + F(i,j-1))) / (2. * (1 + b_sqr))\n\n# lets say I have boundary conditions now\n\ndef LaplaceEqStep(i, j, func):\n F = func\n return (F(i + 1, j) + F(i - 1, j) + F(i, j + 1) + F(i, j - 1)) / 4.\n\ndef LaplaceEqStep_Compass(i, j, F_E, F_W, F_N, F_S):\n return (F_E(i+1, j) + F_W(i-1, j) + F_N(i, j+1) + F_S(i, j-1)) / 4.\n\n\nimport numpy as np\n\n# N-1 by j and M-2 by i, so the matrix is (M-1)*(N-1)\n\nM = 5\nN = 4\n\n# A u = d\n\n\ndef BoundarySolution(i, j):\n if j == 3: return 8.9\n if j == 0: return [6.1, 6.8, 7.7, 8.7, 9.8][i]\n if i == 0: return [6.1, 7.2, 8.4, 8.9][j]\n if i == 4: return [9.8, 9.4, 9.2, 8.9][j]\n\n return None\n\n\ndef Func_d(i, j):\n r = BoundarySolution(i, j)\n if r is None: return 0\n return -r\n\ndef Init_d():\n d = np.zeros((M - 2, N - 2))\n for i in range(1, M-1):\n for j in range(1, N-1):\n d[i-1, j-1] = LaplaceEqStep(i, j, Func_d)\n\n return np.reshape(d, ((M-2)*(N-2), 1))\n\ndef Func_A(i_adj, j_adj):\n # initializing a row\n a = np.zeros((M - 2, N - 2))\n a[i_adj, j_adj] = -1.\n if i_adj > 0: a[i_adj-1, j_adj] = 1./4\n if i_adj < M-2-1: a[i_adj+1, j_adj] = 1./4\n if j_adj > 0: a[i_adj, j_adj-1] = 1./4\n if j_adj < N-2-1: a[i_adj, j_adj+1] = 1./4.\n\n return np.reshape(a, (1, (M-2)*(N-2)))\n\ndef Init_A():\n A = np.zeros(((M - 2)*(N - 2), (M - 2)*(N - 2)))\n for i_adj in range(0, M-2):\n for j_adj in range(0, N-2):\n A[i_adj*(N-2) + j_adj] = Func_A(i_adj, j_adj)\n\n return A\n\nA = Init_A()\nd = Init_d()\n\nprint(A)\nprint(d)\n\nprint(np.linalg.solve(A, d))\n\n\ndef JacobiIteration(tolerance):\n u_0 = np.zeros((M-2, N-2))\n\n def func(i, j):\n r = BoundarySolution(i, j)\n if r is None: return u_0[i-1, j-1]\n\n return r\n\n u_1 = u_0.copy()\n\n iterations = 0\n while True:\n iterations += 1\n\n it = np.nditer(u_1, flags=['multi_index'])\n while not it.finished:\n i, j = it.multi_index\n u_1[i, j] = LaplaceEqStep(i+1, j+1, func)\n\n it.iternext()\n\n if np.linalg.norm((u_1 - u_0).flatten(), np.inf) < tolerance:\n break\n u_1, u_0 = u_0, u_1\n\n u_1 = np.transpose(u_1)\n return np.reshape(u_1, ((M-2)*(N-2), 1)), iterations\n\nprint(JacobiIteration(1e-3))\n\n\ndef GaussSeidelIteration(tolerance):\n u_0 = np.zeros((M-2, N-2))\n\n def func_0(i, j):\n r = BoundarySolution(i, j)\n if r is None: return u_0[i-1, j-1]\n\n return r\n\n def func_1(i, j):\n r = BoundarySolution(i, j)\n if r is None: return u_1[i-1, j-1]\n\n return r\n\n\n u_1 = u_0.copy()\n\n iterations = 0\n while True:\n iterations += 1\n\n it = np.nditer(u_1, flags=['multi_index'])\n while not it.finished:\n i, j = it.multi_index\n u_1[i, j] = LaplaceEqStep_Compass(i+1, j+1, func_0, func_1, func_0, func_1)\n\n it.iternext()\n\n if np.linalg.norm((u_1 - u_0).flatten(), np.inf) < tolerance:\n break\n u_1, u_0 = u_0, u_1\n\n u_1 = np.transpose(u_1)\n return np.reshape(u_1, ((M-2)*(N-2), 1)), iterations\n\nprint(GaussSeidelIteration(1e-3))","repo_name":"antoshkaplus/Py_Modules","sub_path":"FD/fd.py","file_name":"fd.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9045600530","text":"import torch\n\n\ndef k_means(X, k, max_iters=50, tol=1e-9, device=None):\n \"\"\"Do standard k-means clustering.\"\"\"\n n, d = X.shape\n\n x_min = torch.min(X, dim=0)[0]\n x_max = torch.max(X, dim=0)[0]\n\n resp = torch.zeros(n, k, dtype=torch.bool, device=device)\n idx = torch.arange(n)\n\n centroids = torch.rand(\n k, d, device=device\n ) * (x_max - x_min) + x_min\n\n prev_distance = torch.tensor(float('inf'), device=device)\n\n for i in range(max_iters):\n distances = (X[:, None, :] - centroids[None, :, :]).norm(dim=2)\n labels = distances.min(dim=1)[1]\n for j in range(k):\n centroids[j, :] = X[labels == j, :].mean(dim=0)\n resp[:] = False\n resp[idx, labels] = True\n total_distance = distances[resp].sum()\n\n if torch.abs(total_distance - prev_distance) < tol:\n break\n prev_distance = total_distance\n\n return resp.float(), centroids\n\n\ndef minibatch_k_means(loader, k, max_iters=50, tol=1e-3, device=None):\n \"\"\"\n Do minibatch version of k-means\n\n Based on https://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf\n \"\"\"\n centroids = next(iter(loader))[0][:k].to(device)\n counts = torch.ones(k, device=device)\n\n prev_norm = torch.tensor(0.0, device=device)\n\n print('Stating minibatch_k_means')\n for j in range(max_iters):\n print('Iter: {}'.format(j))\n for d in loader:\n X = d[0].to(device)\n diffs = X[:, None, :] - centroids[None, :, :]\n labels = diffs.norm(dim=2).min(dim=1)[1]\n\n counts += torch.bincount(labels, minlength=k).float()\n eta = 1 / counts\n\n mask = torch.zeros_like(diffs)\n mask[torch.arange(mask.shape[0]), labels, :] = 1\n\n centroids += (\n eta[:, None] * (mask * diffs)\n ).sum(dim=0)\n\n\n norm = torch.norm(centroids, dim=0).sum()\n\n if torch.abs(norm - prev_norm) < tol:\n print('Converged')\n return counts, centroids\n prev_norm = norm\n\n\n print('Finished minibatch_k_means')\n return counts, centroids\n\n","repo_name":"bayesiains/density-deconvolution","sub_path":"deconv/gmm/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"41558449621","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom tkinter import ttk\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport csv\r\n\r\ndef display_data(df):\r\n # Clear any existing rows from the Treeview widget\r\n treeview.delete(*treeview.get_children())\r\n\r\n # Display the data in the Treeview widget\r\n for _, row in df.iterrows():\r\n treeview.insert(\"\", \"end\", values=(row[\"Sneaker Name\"], row[\"Price\"], row[\"Attributes\"], row[\"Published Date\"]))\r\n\r\ndef scrape_data():\r\n url = entry_url.get()\r\n\r\n try:\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'\r\n }\r\n response = requests.get(url, headers=headers)\r\n response.raise_for_status()\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n\r\n # Web scraping code to extract data from the website using Beautiful Soup\r\n data = []\r\n for product_item in soup.find_all('div', class_='tiles_container m--t--1'):\r\n # Find the product name inside the product item\r\n product_name_tag = product_item.find('a', class_='tile__title tc--b')\r\n product_name = product_name_tag.text.strip() if product_name_tag else \"N/A\"\r\n\r\n # Find the product price inside the product item\r\n product_price_tag = product_item.find('span', class_='p--t--1 fw--bold')\r\n product_price = product_price_tag.text.strip() if product_price_tag else \"N/A\"\r\n\r\n # Find the product attributes (size and brand) inside the product item\r\n product_attributes = product_item.find('div', class_='d--fl m--t--1').text.strip() if product_item.find('div', class_='d--fl m--t--1') else \"N/A\"\r\n\r\n # Find the product published date inside the product item\r\n product_published_date_tag = product_item.find('div', class_='timestamp')\r\n product_published_date = product_published_date_tag.text.strip() if product_published_date_tag else \"N/A\"\r\n\r\n # Add the extracted data to the list\r\n data.append({\r\n 'Sneaker Name': product_name,\r\n 'Price': product_price,\r\n 'Attributes': product_attributes,\r\n 'Published Date': product_published_date\r\n })\r\n\r\n # Convert the data to a Pandas DataFrame\r\n df = pd.DataFrame(data)\r\n\r\n # Display the data in the table\r\n display_data(df)\r\n\r\n # Save the data to a CSV file (append to existing file)\r\n output_file = \"sneakers_data.csv\"\r\n with open(output_file, mode='a', newline='', encoding='utf-8') as file:\r\n writer = csv.DictWriter(file, fieldnames=[\"Sneaker Name\", \"Price\", \"Attributes\", \"Published Date\"])\r\n if file.tell() == 0: # Check if the file is empty (write header only once)\r\n writer.writeheader()\r\n writer.writerows(data)\r\n\r\n messagebox.showinfo(\"Success\", \"Data has been scraped and exported to CSV successfully!\")\r\n\r\n except requests.exceptions.RequestException as e:\r\n messagebox.showerror(\"Error\", f\"Error fetching data: {e}\")\r\n except Exception as e:\r\n messagebox.showerror(\"Error\", f\"An error occurred: {e}\")\r\n\r\nif __name__ == \"__main__\":\r\n # Create the main application window\r\n app = tk.Tk()\r\n app.title(\"Sneakers Fashion Website Scraper\")\r\n\r\n # Create GUI elements\r\n label_url = tk.Label(app, text=\"Enter website URL:\")\r\n label_url.pack()\r\n\r\n entry_url = tk.Entry(app, width=50)\r\n entry_url.pack()\r\n\r\n btn_scrape = tk.Button(app, text=\"Scrape Data\", command=scrape_data)\r\n btn_scrape.pack()\r\n\r\n # Create Treeview widget to display the data\r\n treeview = ttk.Treeview(app, columns=[\"Sneaker Name\", \"Price\", \"Attributes\", \"Published Date\"], show=\"headings\")\r\n\r\n # Define column headings\r\n treeview.heading(\"Sneaker Name\", text=\"Sneaker Name\")\r\n treeview.heading(\"Price\", text=\"Price\")\r\n treeview.heading(\"Attributes\", text=\"Attributes\")\r\n treeview.heading(\"Published Date\", text=\"Published Date\")\r\n\r\n # Set column widths\r\n treeview.column(\"Sneaker Name\", width=200)\r\n treeview.column(\"Price\", width=100)\r\n treeview.column(\"Attributes\", width=200)\r\n treeview.column(\"Published Date\", width=150)\r\n\r\n treeview.pack()\r\n\r\n # Start the main loop\r\n app.mainloop()\r\n","repo_name":"AmirSayab/SwagKicksHackathon","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34579917204","text":"from Functionalities import Execute\r\n\r\nGeometry_names = [\"Horizontal_Parabola\",\r\n \"Semiesfera\",\r\n \"Cono\",\r\n \"F100\"]\r\n\r\nFunctionalities_names = [\"Evolución de los Coeficientes con el Mach\",\r\n \"Cáculo y representación 3-D\",\r\n \"Convergencia con el número de elementos\"]\r\n\r\n#################################\r\nnx = 200; nphi = 200\r\ngeometry_name = Geometry_names[1]\r\n\r\nMach = 5; gamma = 1.4\r\nalpha = 0; beta = 0\r\n\r\napplication = Functionalities_names[1]\r\n#################################\r\n\r\nExecute(geometry_name, application, nx, nphi, alpha, beta, Mach, gamma)","repo_name":"Aerospace-Curiosity/Hito_7","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10505641248","text":"import numpy as np\r\nfrom LUgauss import LUgauss\r\ndef LUsolver(lu, b): # lu = b, where lu is a matrix consisting of both\r\n n = len(lu) # lower and upper entries stored in 1 matrix.\r\n for i in range(1,n):\r\n for j in range(0,i):\r\n b[i] -= lu[i][j] * b[j]\r\n\r\n x = np.zeros(n)\r\n x[n-1] = b[n-1] / lu[n-1][n-1]\r\n for i in range(n-2,-1,-1):\r\n x[i] = b[i]\r\n for j in range(i+1,n):\r\n x[i] -= lu[i][j] * x[j]\r\n x[i] /= lu[i][i]\r\n x[i] = round(x[i],7)\r\n print(x)\r\n return(x)\r\n\r\n\r\nAtest = np.array([[1,2,0],[2,3,5],[4,1,1]],dtype=float)\r\nbtest = np.array([5,23,9],dtype=float)\r\nlutest = LUgauss(Atest) #This function converts matrix Atest into LU form\r\nLUsolver(lutest,btest) #This function prints out the values of x, i.e. = [1,2,3]\r\n","repo_name":"gerrylwk/Efficient-Matrix-Algorithms","sub_path":"LUsolver.py","file_name":"LUsolver.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7199095845","text":"from rest_framework import serializers\n\nfrom .models import Item, ItemAdjustment\nfrom item_groups.serializers import ItemGroupSerializer\nfrom item_definitions.serializers import ItemSpecificationSerializer\n\n\nclass ItemAdjustmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = ItemAdjustment\n fields = \"__all__\"\n\n\nclass ItemSerializer(serializers.ModelSerializer):\n adjustments = serializers.SerializerMethodField()\n item_group = ItemGroupSerializer(many=True, read_only=True)\n specification = ItemSpecificationSerializer(read_only=True)\n\n def get_adjustments(self, item):\n adjustments = ItemAdjustment.objects.filter(\n definition=item.specification.definition,\n user_group=item.location.user_group,\n )\n return ItemAdjustmentSerializer(list(adjustments), many=True).data\n\n class Meta:\n model = Item\n fields = \"__all__\"\n\n\nclass AdjustItemSerializer(serializers.Serializer):\n change = serializers.DecimalField(max_digits=7, decimal_places=2)\n price = serializers.DecimalField(required=False, max_digits=7, decimal_places=2)\n note = serializers.CharField(required=False, max_length=255)\n","repo_name":"ammitchky/GroceryManager","sub_path":"src/GroceryManager/items/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41682114300","text":"import time as t\nimport math\nimport numpy as np\nimport copy as cp\nimport cv2\nimport random\n\nclass Time:\n # Measuring time\n def __init__(self):\n self.start_time = t.time()\n\n def elapsed(self):\n return t.time() - self.start_time\n\ndef drawKeypoints(frame, kps, BGR, size):\n for i in range(len(kps)):\n for j in range(len(kps[i])):\n x = math.floor(kps[i, j, 0])\n y = math.floor(kps[i, j, 1])\n\n frame = cv2.circle(frame, (x, y), size, BGR, -1)\n\n return frame\n\n#\n# kps -> Matrix with Frames x Keypoints\n# path -> Input video path\n# out_path -> Output video path\n# BGR -> (B, G, R) colors\n#\ndef videoFlow(kps, path, out_path, BGR):\n # Transform into numpy array\n kps = np.array(kps)\n\n # Open video and get settings\n video = cv2.VideoCapture(path)\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = video.get(cv2.CAP_PROP_FPS)\n\n # Open output video\n output_video = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\n\n # Draw new images\n for i in range(length):\n # Get frame\n ret, frame = video.read()\n\n # Draw keypoints\n frame = drawKeypoints(frame, kps[0:i,:,:], BGR, 1)\n\n # Write new image to video\n output_video.write(frame)\n\n # Close videos\n video.release()\n output_video.release()\n\ndef videoFlow2(kps1, kps2, path, out_path, BGR1, BGR2):\n # Transform into numpy array\n kps1 = np.array(kps1)\n kps2 = np.array(kps2)\n\n # Open video and get settings\n video = cv2.VideoCapture(path)\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = video.get(cv2.CAP_PROP_FPS)\n\n # Open output video\n output_video = cv2.VideoWriter(out_path, fourcc, fps, (2*width, height))\n\n # Draw new images\n for i in range(length):\n # Get frame\n ret, frame = video.read()\n\n # Draw keypoints 1\n frame1 = drawKeypoints(cp.copy(frame), kps1[0:i,:,:], BGR1, 1)\n\n # Draw keypoints 2\n frame2 = drawKeypoints(cp.copy(frame), kps2[0:i,:,:], BGR2, 1)\n\n out = np.concatenate((frame1, frame2), axis=1)\n\n # Write new image to video\n output_video.write(out)\n\n # Close videos\n video.release()\n output_video.release()","repo_name":"Brenolleite/Computer_Vision-MO446","sub_path":"p3-XX-YY/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6350212468","text":"import torch\r\n\r\nimport numpy as np\r\n\r\n# class NeRFmodel(torch.nn.Module):\r\n# def __init__(self,lx,ld,W):\r\n# super().__init__()\r\n# self.MLPinput=torch.nn.Linear(lx+ld+3,W)\r\n# self.MLPmid0=torch.nn.Sequential(*self.MLPstd(4,256))\r\n# self.Denseout=torch.nn.Linear(W+lx+ld+3,W)\r\n# self.MLPmid1=torch.nn.Sequential(*self.MLPstd(2,256)) \r\n# self.Modelout=torch.nn.Linear(W,4)\r\n# self.relu=torch.nn.ReLU()\r\n# def MLPstd(self,numsLayer,width):\r\n# mlpstd=[]\r\n# for i in range(numsLayer,):\r\n# mlpstd.append(torch.nn.Linear(width,width))\r\n# mlpstd.append(torch.nn.ReLU())\r\n# return mlpstd\r\n# def forward(self,X):\r\n# x0=self.relu(self.MLPinput(X))\r\n# x0=self.MLPmid0(x0)\r\n# x0=self.relu(self.Denseout(torch.concat([x0,X],-1)))\r\n# x0=self.MLPmid1(x0)\r\n# output=self.Modelout(x0)\r\n# return output\r\nclass NeRFmodel(torch.nn.Module):\r\n def __init__(self,lx=36,ld=36,W=256, **kwargs):\r\n super().__init__()\r\n self.MLPinput=torch.nn.Linear(lx+3,W)\r\n self.MLPmid0=torch.nn.Sequential(*self.MLPstd(4,256))\r\n self.Denseout=torch.nn.Linear(W+lx+3,W)\r\n self.MLPmid1=torch.nn.Sequential(*self.MLPstd(2,256)) \r\n self.relu=torch.nn.ReLU()\r\n\r\n self.feature_linear = torch.nn.Linear(W, W)\r\n self.view_linear=torch.nn.Linear(ld+ 3 + W, W//2)\r\n self.alpha_linear = torch.nn.Linear(W, 1)\r\n self.rgb_output = torch.nn.Linear(W//2, 3)\r\n self.softplus = torch.nn.Softplus(beta=100)\r\n \r\n\r\n\r\n def MLPstd(self,numsLayer,width):\r\n mlpstd=[]\r\n for i in range(numsLayer,):\r\n mlpstd.append(torch.nn.Linear(width,width))\r\n mlpstd.append(torch.nn.ReLU())\r\n return mlpstd\r\n\r\n\r\n def forward(self,embpoints,embdir=None,if_occ=False,**kwargs):\r\n x0=self.softplus(self.MLPinput(embpoints))\r\n x0=self.MLPmid0(x0)\r\n x0=self.softplus(self.Denseout(torch.concat([x0,embpoints],-1)))\r\n x0=self.MLPmid1(x0)\r\n\r\n dense=self.alpha_linear(x0)\r\n if if_occ:\r\n return dense\r\n x0=self.feature_linear(x0)\r\n x0=self.relu(self.view_linear(torch.concat((x0,embdir),dim=-1)))\r\n x0=self.rgb_output(x0)\r\n\r\n return torch.concat((x0,dense),dim=-1)\r\n \r\nclass NeRFtinymodel(torch.nn.Module):\r\n def __init__(self,W=64,ld=36,n_features_per_level=2,n_levels=16,num_hiddense=1,num_hidcolor=2,num_hidcolordim=15,**kwargs):\r\n super().__init__()\r\n self.inputsize=n_features_per_level*n_levels\r\n \r\n self.MLPinput=torch.nn.Linear(self.inputsize,W)\r\n self.MLPmid0=torch.nn.Sequential(*self.MLPstd(num_hiddense,W))\r\n self.Denseout=torch.nn.Linear(W,1+num_hidcolordim)\r\n self.colorlayerin=torch.nn.Linear(num_hidcolordim+ld+3,W)\r\n self.MLPmid1=torch.nn.Sequential(*self.MLPstd(num_hidcolor,W)) \r\n self.colorout=torch.nn.Linear(W,3)\r\n self.relu=torch.nn.ReLU()\r\n def MLPstd(self,numsLayer,width):\r\n mlpstd=[]\r\n for i in range(numsLayer,):\r\n mlpstd.append(torch.nn.Linear(width,width))\r\n mlpstd.append(torch.nn.ReLU())\r\n return mlpstd\r\n def forward(self,emb_points,enc_dir,if_occ=False, **kwargs):\r\n x0=self.relu(self.MLPinput(emb_points))\r\n x0=self.MLPmid0(x0)\r\n x0=self.Denseout(x0)\r\n\r\n dense=x0[...,:1]\r\n if if_occ:\r\n return dense\r\n h=x0[...,1:]\r\n\r\n h=self.relu(self.colorlayerin(torch.concat([h,enc_dir],-1)))\r\n h=self.MLPmid1(h)\r\n color=self.colorout(h)\r\n return torch.concat((color,dense),dim=-1)\r\n\r\nclass NeRFoccmodel(torch.nn.Module):\r\n def __init__(self,lx=36,ld=36,W=256, **kwargs):\r\n super().__init__()\r\n self.MLPinput=torch.nn.Linear(lx+3,W)\r\n self.MLPmid0=torch.nn.Sequential(*self.MLPstd(4,256))\r\n self.Denseout=torch.nn.Linear(W+lx+3,W)\r\n self.MLPmid1=torch.nn.Sequential(*self.MLPstd(2,256)) \r\n self.relu=torch.nn.ReLU()\r\n\r\n self.feature_linear = torch.nn.Linear(W, W)\r\n self.view_linear=torch.nn.Linear(ld+ 3 + W, W//2)\r\n self.alpha_linear = torch.nn.Linear(W, 1)\r\n self.rgb_output = torch.nn.Linear(W//2, 3)\r\n\r\n\r\n def MLPstd(self,numsLayer,width):\r\n mlpstd=[]\r\n for i in range(numsLayer,):\r\n mlpstd.append(torch.nn.Linear(width,width))\r\n mlpstd.append(torch.nn.ReLU())\r\n return mlpstd\r\n\r\n\r\n def forward(self,embpoints,embdir,if_occ=False,**kwargs):\r\n x0=self.relu(self.MLPinput(embpoints))\r\n x0=self.MLPmid0(x0)\r\n x0=self.relu(self.Denseout(torch.concat([x0,embpoints],-1)))\r\n x0=self.MLPmid1(x0)\r\n\r\n dense=self.alpha_linear(x0)\r\n if if_occ:\r\n return dense\r\n x0=self.feature_linear(x0)\r\n x0=self.relu(self.view_linear(torch.concat((x0,embdir),dim=-1)))\r\n x0=self.rgb_output(x0)\r\n\r\n return torch.concat((x0,dense),dim=-1)\r\n\r\nclass PositionalEncoding(object):\r\n def __init__(self, L=10):\r\n self.L = L\r\n def __call__(self, p):\r\n pi = 1.0\r\n p_transformed = torch.cat([torch.cat(\r\n [torch.sin((2 ** i) * pi * p), \r\n torch.cos((2 ** i) * pi * p)],\r\n dim=-1) for i in range(self.L)], dim=-1)\r\n return torch.cat([p, p_transformed], dim=-1) \r\nclass NeuralNetwork(torch.nn.Module):\r\n ''' Network class containing occupanvy and appearance field\r\n \r\n Args:\r\n cfg (dict): network configs\r\n '''\r\n\r\n def __init__(self, occ_freq=10,view_freq=4,num_layer=8,**kwargs):\r\n super().__init__()\r\n self.octaves_pe=occ_freq\r\n self.octaves_pe_views=view_freq\r\n self.transform_points=PositionalEncoding(L=self.octaves_pe)\r\n self.transform_points_view = PositionalEncoding(L=self.octaves_pe_views)\r\n self.rescale=1\r\n self.num_layers=num_layer\r\n\r\n\r\n\r\n self.feat_size =256\r\n self.skips=[4]\r\n\r\n dim = 3\r\n hidden_size=256\r\n self.alfa=-10 \r\n dim_embed = dim*self.octaves_pe*2 + dim\r\n\r\n dim_embed_view = dim + dim*self.octaves_pe_views*2 + dim + dim + self.feat_size \r\n\r\n dims_view = [dim_embed_view]+ [ hidden_size for i in range(0, 4)] + [3]\r\n\r\n self.num_layers_app = len(dims_view)\r\n\r\n self.relu = torch.nn.ReLU()\r\n self.tanh = torch.nn.Tanh()\r\n self.sigmoid = torch.nn.Sigmoid()\r\n self.softplus = torch.nn.Softplus(beta=100)\r\n\r\n ### geo network\r\n dims_geo = [dim_embed]+ [ hidden_size if i in self.skips else hidden_size for i in range(0, self.num_layers)] + [self.feat_size+1] \r\n self.num_layers = len(dims_geo)\r\n for l in range(0, self.num_layers - 1):\r\n if l + 1 in self.skips:\r\n out_dim = dims_geo[l + 1] - dims_geo[0]\r\n else:\r\n out_dim = dims_geo[l + 1]\r\n\r\n lin = torch.nn.Linear(dims_geo[l], out_dim)\r\n\r\n if True:\r\n if l == self.num_layers - 2:\r\n torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims_geo[l]), std=0.0001)\r\n torch.nn.init.constant_(lin.bias, -0.6)\r\n elif self.octaves_pe > 0 and l == 0:\r\n torch.nn.init.constant_(lin.bias, 0.0)\r\n torch.nn.init.constant_(lin.weight[:, 3:], 0.0)\r\n torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))\r\n elif self.octaves_pe > 0 and l in self.skips:\r\n torch.nn.init.constant_(lin.bias, 0.0)\r\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\r\n torch.nn.init.constant_(lin.weight[:, -(dims_geo[0] - 3):], 0.0)\r\n else:\r\n torch.nn.init.constant_(lin.bias, 0.0)\r\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\r\n\r\n \r\n lin = torch.nn.utils.weight_norm(lin)\r\n\r\n setattr(self, \"lin\" + str(l), lin)\r\n\r\n self.softplus = torch.nn.Softplus(beta=100)\r\n\r\n ## appearance network\r\n dims_view = [dim_embed_view]+ [ hidden_size for i in range(0, 4)] + [3]\r\n\r\n self.num_layers_app = len(dims_view)\r\n\r\n for l in range(0, self.num_layers_app - 1):\r\n out_dim = dims_view[l + 1]\r\n lina = torch.nn.Linear(dims_view[l], out_dim)\r\n lina = torch.nn.utils.weight_norm(lina)\r\n setattr(self, \"lina\" + str(l), lina)\r\n\r\n def infer_occ(self, p):\r\n pe = self.transform_points(p/self.rescale)\r\n x = pe\r\n for l in range(0, self.num_layers - 1):\r\n lin = getattr(self, \"lin\" + str(l))\r\n if l in self.skips:\r\n x = torch.cat([x, pe], -1) / np.sqrt(2)\r\n x = lin(x)\r\n if l < self.num_layers - 2:\r\n x = self.softplus(x) \r\n return x\r\n \r\n def infer_app(self, points, normals, view_dirs, feature_vectors):\r\n rendering_input = torch.cat([points, view_dirs, normals.squeeze(-2), feature_vectors], dim=-1)\r\n x = rendering_input\r\n for l in range(0, self.num_layers_app - 1):\r\n lina = getattr(self, \"lina\" + str(l))\r\n x = lina(x)\r\n if l < self.num_layers_app - 2:\r\n x = self.relu(x)\r\n x = self.tanh(x) * 0.5 + 0.5\r\n return x\r\n\r\n def gradient(self, p, tflag=True):\r\n with torch.enable_grad():\r\n p.requires_grad_(True)\r\n y = self.infer_occ(p)[...,:1]\r\n d_output = torch.ones_like(y, requires_grad=False, device=y.device)\r\n gradients = torch.autograd.grad(\r\n outputs=y,\r\n inputs=p,\r\n grad_outputs=d_output,\r\n create_graph=tflag,\r\n retain_graph=tflag,\r\n only_inputs=True, allow_unused=tflag)[0]\r\n return gradients.unsqueeze(1)\r\n def set_alfa(self,alfa):\r\n self.alfa=alfa\r\n return 0\r\n\r\n\r\n def forward(self, p, ray_d=None, only_occupancy=False, return_logits=False,return_addocc=False, noise=False, **kwargs):\r\n # print(p.shape)\r\n x = self.infer_occ(p)\r\n # print(x.shape)\r\n if only_occupancy:\r\n # return self.sigmoid(x[...,:1] * -10.0)\r\n return self.sigmoid(x[...,:1] * self.alfa)\r\n elif ray_d is not None:\r\n \r\n input_views = ray_d / torch.norm(ray_d, dim=-1, keepdim=True)\r\n input_views = self.transform_points_view(input_views)\r\n normals = self.gradient(p)\r\n #normals = n / (torch.norm(n, dim=-1, keepdim=True)+1e-6)\r\n # print(x.shape)\r\n # print(p.shape)\r\n # print(normals.shape)\r\n # print(input_views.shape)\r\n rgb = self.infer_app(p, normals, input_views, x[...,1:])\r\n if return_addocc:\r\n return rgb, self.sigmoid(x[...,:1] * self.alfa )\r\n else:\r\n return rgb\r\n elif return_logits:\r\n return -1*x[...,:1]\r\n","repo_name":"pulangk97/nerfPytorch","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11190,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"22877900727","text":"import random\n\nclass HangMan:\n \"\"\"\n This is the docstring for the HangMan class.\n\n Hangman class is a class the initialises a hangman game.\n\n Attributes:\n word_list (list[str]): a list of words for the game.\n num_lives (int): number of lives for the player of the game. Default is 5.\n word (str): a random word for the game from the word_list.\n word_guessed (list[str]): list of letters of guessed word by the player. Initially a list of empty chars the length of the word.\n num_letters (int): number of unique characters in word\n list_of_guesses (list[char]): list that holds the guesses of characters a player makes. Initially is [].\n \"\"\"\n def __init__(self, word_list, num_lives=5):\n \"\"\"\n Constructor for the HangMan class.\n\n Parameters:\n word_list (list[str]): a list of words for the instance of the game.\n num_lives (int): number of lives a player gets for the instance of the game. Default is 5.\n \"\"\"\n self.word = random.choice(word_list)\n self.word_guessed = ['' for _ in self.word]\n self.num_letters = len(set(self.word))\n self.word_list = word_list\n self.num_lives = num_lives\n self.list_of_guesses = []\n\n def check_guess(self, guess):\n \"\"\"\n This method checks if the guess is in the word\n\n Args:\n guess (char): a letter guess.\n \"\"\"\n guess = guess.lower()\n self.list_of_guesses.append(guess)\n if guess in self.word:\n print(f\"Good guess! '{guess}' is in the word.\")\n first_idx = -1 # int to start searching for the index of guess in word\n for letter in self.word:\n if letter == guess:\n idx = self.word.index(guess, first_idx + 1) # check for the index of guess in word\n self.word_guessed[idx] = guess\n first_idx = idx\n self.num_letters -= 1\n\n else:\n self.num_lives -= 1\n print(f\"Sorry, '{guess}' is not in the word.\")\n print(f\"You have {self.num_lives} lives left.\")\n\n \n def ask_for_input(self):\n \"\"\"\n This method asks for input from the player and checks guesses.\n \"\"\"\n guess = input(\"Please guess a letter: \")\n if len(guess) > 1 or not guess.isalpha():\n print(\"Invalid letter. Please, enter a single alphabetic character.\")\n elif guess in self.list_of_guesses:\n print(\"You already tried that letter!\")\n else:\n self.check_guess(guess)\n self.list_of_guesses.append(guess)\n\ndef play_game(word_list):\n \"\"\"\n This method initialises the hangman game and starts the play loop\n\n Args:\n word_list (lst[str]): The list of words for the game\n \"\"\"\n num_lives = 5\n game = HangMan(word_list, num_lives)\n while True:\n if game.num_lives == 0:\n print(\"You lost!\")\n break\n elif game.num_letters > 0:\n game.ask_for_input()\n elif game.num_lives != 0 and game.num_letters == 0:\n print(\"Congratulations. You won the game!\")\n break\n\nif __name__ == \"__main__\":\n play_game([\"apple\", \"grape\"])\n","repo_name":"prg48/hangman","sub_path":"milestone_5.py","file_name":"milestone_5.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20258542909","text":"import requests\nimport json\nfrom requests.auth import HTTPBasicAuth\nfrom django.conf import settings\n\n\ndef get_access_token():\n client_id = settings.SAFARICOM_AUTH_KEY\n client_secret = settings.SAFARICOM_AUTH_CONSUMER_SECRET\n token_endpoint = settings.SAFARICOM_AUTH_ENDPOINT\n params = {'grant_type': 'client_credentials'}\n try:\n res = requests.get(token_endpoint,\n auth=HTTPBasicAuth(client_id, client_secret), params=params, verify=True)\n response = json.loads(res.text)\n access_token = response['access_token']\n return access_token\n except KeyError as key:\n return key\n except Exception as error:\n print('TOKEN ERROR:', str(error))\n return error\n ","repo_name":"KibokoDao-Africa/offrampsdk","sub_path":"utils/api_auth.py","file_name":"api_auth.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17594877833","text":"from django.db import models\nfrom django.conf import settings\nfrom products.models import ProductModel\nfrom django.db.models.signals import pre_save, post_save, m2m_changed\n# Create your models here.\n\n\nUser = settings.AUTH_USER_MODEL\n\n\nclass CartManager(models.Manager):\n def new_get(self, request):\n cart_id = request.session.get('cart_id', None)\n queryset = self.get_queryset().filter(id=cart_id)\n if queryset.count() == 1:\n new_object = False\n cart_object = queryset.first()\n if request.user.is_authenticated and cart_object.user is None:\n cart_object.user = request.user\n cart_object.save()\n else:\n cart_object = Cart.objects.new_cart(user=request.user)\n new_object = True\n request.session['cart_id'] = cart_object.id\n return cart_object, new_object\n\n def new_cart(self, user=None):\n user_object = None\n if user is not None:\n if user.is_authenticated:\n user_object = user\n return self.model.objects.create(user=user_object)\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)\n product = models.ManyToManyField(ProductModel, blank=True)\n subtotal = models.DecimalField(default=0.00, max_digits=100000, decimal_places=2)\n total = models.DecimalField(default=0.00, max_digits=100000, decimal_places=2)\n\n objects = CartManager()\n\n def __str__(self):\n return str(self.id)\n\n\ndef m2m_save_cart_r(instance, action, **kwargs):\n if action == 'post_add' or action == 'post_remove' or action == 'post_clear':\n product = instance.product.all()\n total = 0\n for prod in product:\n total += prod.price\n if instance.subtotal != total:\n instance.subtotal = total\n instance.save()\n\n\nm2m_changed.connect(m2m_save_cart_r, sender=Cart.product.through)\n\n\ndef pre_save_cart(instance, **kwargs):\n if instance.subtotal > 0:\n instance.total = instance.subtotal\n else:\n instance.total = 0.00\n\n\npre_save.connect(pre_save_cart, sender=Cart)\n","repo_name":"ISHARRR/ecomm","sub_path":"carts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72407707466","text":"import uuid\nfrom datetime import datetime, date\nfrom re import escape\n\nimport stringcase\nfrom bson import Regex, ObjectId\n\nfrom app.server.db.collections import flow_collection\nfrom app.server.db.collections import question_collection as collection\nfrom app.server.db_utils.flows import add_flows_to_db_from_question\nfrom app.server.db_utils.helper import question_helper\nfrom app.server.models.current_user import CurrentUserSchema\nfrom app.server.models.flow import NewFlow\nfrom app.server.models.question import QuestionSchemaDb, QuestionIn\nfrom app.server.utils.common import form_query, RequestMethod\nfrom app.server.utils.timezone import make_timezone_aware, get_local_datetime_now\n\n\nasync def get_question_one(_id: str) -> QuestionSchemaDb:\n query = {\"_id\": ObjectId(_id)}\n async for question in collection.find(query):\n return QuestionSchemaDb(**question_helper(question))\n\n\nasync def get_questions_db(*, current_page: int, page_size: int, sorter: str = None, query: dict) -> list[\n QuestionSchemaDb]:\n # always show the newest first\n sort = [(\"_id\", -1)]\n if sorter:\n # [(\"answers\", 1), (\"bot_user_group\", 1)]\n for s in sorter.split(','):\n order = s[:1]\n key = s[1:]\n if order == '+':\n sort = [(stringcase.snakecase(key), 1)]\n else:\n sort = [(stringcase.snakecase(key), -1)]\n\n cursor = collection.find(query, sort=sort)\n cursor.skip((current_page - 1) * page_size).limit(page_size)\n questions = []\n async for question in cursor:\n questions.append(QuestionSchemaDb(**question_helper(question)))\n return questions\n\n\nasync def get_questions_count_db(*, query: dict) -> int:\n count = collection.count_documents(query)\n return await count\n\n\nasync def get_questions_and_count_db(*, current_page: int, page_size: int, sorter: str = None, question_text: str,\n language: str, topic: str,\n updated_at: list[date], triggered_counts: list[int]) -> (\n list[QuestionSchemaDb], int):\n if updated_at:\n updated_at_start, updated_at_end = updated_at\n db_key = [(\"topic\", Regex(f\".*{escape(topic)}.*\", \"i\") if topic else ...),\n (f\"text.{language}\", Regex(f\".*{escape(question_text)}.*\", \"i\") if question_text else ...),\n (f\"triggered_count\", {\"$gte\": triggered_counts[0],\n \"$lte\": triggered_counts[1]} if triggered_counts else ...),\n (\"is_active\", True),\n (\"updated_at\", {\"$gte\": make_timezone_aware(updated_at_start),\n \"$lte\": make_timezone_aware(updated_at_end)} if updated_at else ...)]\n query = form_query(db_key)\n\n questions = await get_questions_db(current_page=current_page, page_size=page_size, sorter=sorter, query=query)\n total = await get_questions_count_db(query=query)\n return questions, total\n\n\nasync def get_topics_db():\n query = {\"is_active\": True}\n topics = await collection.distinct('topic', query)\n return topics\n\n\nasync def add_question_db(question: QuestionIn, current_user: CurrentUserSchema) -> str:\n doc = await process_question(question, current_user, method=RequestMethod.ADD)\n result = await collection.insert_one(doc)\n return f\"Added {1 if result.acknowledged else 0} question.\"\n\n\nasync def process_question(question: QuestionIn, current_user: CurrentUserSchema, *, method: RequestMethod):\n variations = []\n if question.variations:\n for alt in question.variations.split('\\n'):\n if not alt:\n continue\n variation = {\n \"id\": str(uuid.uuid4()),\n \"text\": alt.strip(),\n \"language\": question.language,\n \"internal\": False\n }\n variations.append(variation)\n\n question_start = question_end = None\n if question.question_time:\n question_start = make_timezone_aware(datetime.strptime(question.question_time[0], '%Y-%m-%d'))\n question_end = make_timezone_aware(datetime.strptime(question.question_time[1], '%Y-%m-%d'))\n\n if question.response_type == 'text':\n # if add, always create unnamed flow. if edit, find if there's exact flow(match topic and text)\n query = {f'flow.data.text.{question.language}': question.text_response, 'is_active': True,\n \"name\": {\"$exists\": False}, \"topic\": question.topic}\n exist_response = await flow_collection.find_one(query)\n if exist_response:\n flow_id = exist_response['_id']\n else: # create unnamed response\n flow_doc = [{\"type\": \"message\", \"data\": {\"text\": {question.language: question.text_response}}}]\n flow = NewFlow(**{\"topic\": question.topic, \"type\": \"storyboard\", \"flow_items\": flow_doc})\n flow_id = await add_flows_to_db_from_question(flow, current_user)\n else: # question.response_type == 'flow'\n flow_id = question.flow_response\n doc = {\n \"created_at\": get_local_datetime_now(),\n \"created_by\": ObjectId(current_user.userId),\n \"updated_at\": get_local_datetime_now(),\n \"updated_by\": ObjectId(current_user.userId),\n \"text\": {question.language: question.main_question},\n \"internal\": False,\n \"keyword\": question.tags,\n \"answers\": [\n {\n \"id\": \"1\",\n \"flow\": {\"flow_id\": ObjectId(flow_id)},\n \"bot_user_group\": \"1\"\n }\n ],\n \"alternate_questions\": variations,\n \"topic\": question.topic,\n \"active_at\": question_start,\n \"expire_at\": question_end,\n \"is_active\": True\n }\n\n if method == RequestMethod.EDIT:\n keys_to_remove = [\"created_at\", \"created_by\"]\n for key in keys_to_remove:\n doc.pop(key)\n return doc\n\n\nasync def edit_question_db(question: QuestionIn, current_user: CurrentUserSchema):\n doc = await process_question(question, current_user, method=RequestMethod.EDIT)\n new_values = {\"$set\": doc}\n result = await collection.update_one({\"_id\": ObjectId(question.id)}, new_values)\n return f\"Updated {result.modified_count} question.\"\n\n\nasync def remove_questions_db(question_ids: list[str], current_user: CurrentUserSchema) -> str:\n query = {\"_id\": {\"$in\": [ObjectId(q) for q in question_ids]}, \"is_active\": True}\n linked_flows = await collection.distinct('answers.flow.flow_id', query)\n\n # delete questions\n set_query = {\n \"updated_at\": get_local_datetime_now(),\n \"updated_by\": ObjectId(current_user.userId),\n \"is_active\": False\n }\n result1 = await collection.update_many(query, {'$set': set_query})\n\n # delete flows related\n query = {\"_id\": {\"$in\": linked_flows}, \"name\": {\"$exists\": False}, \"is_active\": True}\n result2 = await flow_collection.update_many(query, {'$set': set_query})\n return f\"Removed {result1.modified_count} questions and {result2.modified_count} linked flows.\"\n\n\ndef get_question_cursor(field=None):\n projection = None\n query = {\"is_active\": True}\n if field:\n projection = {f: 1 for f in field.split(',')}\n projection['_id'] = 1\n return query, projection\n\n\nasync def get_question_filtered_field_list(field=None):\n query, projection = get_question_cursor(field)\n questions = []\n async for question in collection.find(query, projection=projection):\n questions.append(question_helper(question))\n return questions\n","repo_name":"brlala/adminhub_backend","sub_path":"app/server/db_utils/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":7517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2636812812","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\ntrain = pd.read_csv('titanic_train.csv')\n# FIND WHICH COLUMNS CONSIST OF MORE MISSING DATA\n# sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')\n# plt.show(sns)\n# IN THIS CASE IT WAS MISSING SOME OF AGE DATA AND A LOT OF CABIN DATA\n\n# sns.set_style('whitegrid')\n\n# SURVIVED VS NOT SURVIVED DATA\n# sns.countplot(x='Survived',data=train)\n# plt.show(sns)\n# SURVIVED PEOPLE BY GENDER\n# sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')\n# plt.show(sns)\n# SURVIVED PEOPLE BY PASSENGER CLASS\n# sns.countplot(x='Survived',hue='Pclass',data=train,palette='RdBu_r')\n\n# AMOUNT OF PASSENGERS BY AGE\nsns.distplot(train['Age'].dropna(), kde=False, bins=35)\n\n\n# plt.show(sns)\n# GET AVERAGE OF AGE BY CLASS IN ORDER TO REMOVE MISSING DATA FROM AGE CATEGORY\ndef impute_age(cols):\n age = cols[0]\n pcclass = cols[1]\n\n if pd.isnull(age):\n if pcclass == 1:\n return 37\n elif pcclass == 2:\n return 29\n else:\n return 24\n else:\n return age\n\n\ntrain['Age'] = train[['Age', 'Pclass']].apply(impute_age, axis=1)\n\n# sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis')\n# plt.show(sns)\n\n# WE GONNA DROP CABIN COLUMNS CAUSE IT HAS TOO MUCH MISSING DATA\ntrain.drop('Cabin', axis=1, inplace=True)\n# DROPPING REMAINED NULL COLUMNS\ntrain.dropna(inplace=True)\n\n# CONVERT MALE/FEMALE TO NUMS 0 AND 1, CAUSE COMPUTER CAN'T USE STRING VALUES\n# NEXT STEP IS TO DROP ONE OF COLUMN (MALE OR FEMALE) NOT TO MESS UP EVERYTHING\nsex = pd.get_dummies(train['Sex'], drop_first=True)\n# DO THE SAME WITH EMBARKED COLUMN\nembark = pd.get_dummies(train['Embarked'], drop_first=True)\n# DO THE SAME WITH PCLASS COLUMN\n# pclass = pd.get_dummies(train['Pclass'], drop_first=True)\n# NEXT STEP IS TO CONCAT OUR NEW COLUMNS TO EXISTING ONES\ntrain = pd.concat([train, sex, embark], axis=1)\n# DROPPING SEX AND EMBARKED CAUSE WE ALREADY REPLACED THESE COLUMNS WITH NEW ONES\n# DROPPING NAME AND TICKET AND PASSENGER ID COLUMNS CAUSE THEY DON'T CONSIST OF USEFUL INFORMATION\ntrain.drop(['Sex', 'Embarked', 'Name', 'Ticket', 'PassengerId'], axis=1, inplace=True)\n# print(train)\n\n# DATA FOR PREDICTION\n# DROPPING SURVIVED COLUMN, AND GET EACH COLUMN EXCEPT SURVIVED\nX = train.drop('Survived', axis=1)\n# DATA NEED TO BE PREDICTED\ny = train['Survived']\n\n# SPLIT DATA FOR TRAIN AND TEST\n# TEST SIZE IS PERCENTAGE OF DATA USED IN TEST (in this case 30% of data)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)\n# CREATE MODEL\nlogmodel = LogisticRegression()\n# FIT MODEL ON TRAINING DATA\nlogmodel.fit(X_train, y_train)\n# PREDICT MODEL\npredictions = logmodel.predict(X_test)\n# CLASSIFICATION FINAL REPORT\nprint(classification_report(y_test, predictions))\n# print(confusion_matrix(y_test,predictions))\n\n\n# 1 TN / True Negative: when a case was negative and predicted negative\n# 2 TP / True Positive: when a case was positive and predicted positive\n# 3 FN / False Negative: when a case was positive but predicted negative\n# 4 FP / False Positive: when a case was negative but predicted positive\n\n# PRECISION\n# Precision is the ability of a classifier not to label an instance positive\n# that is actually negative. For each class it is defined as the ratio\n# of true positives to the sum of true and false positives.\n#\n# Precision is Accuracy of positive predictions.\n# Precision = TP/(TP + FP)\n\n# RECALL\n# Recall is the ability of a classifier to find all positive instances.\n# For each class it is defined as the ratio of true positives\n# to the sum of true positives and false negatives.\n#\n# Recall: Fraction of positives that were correctly identified.\n# Recall = TP/(TP+FN)\n\n# F1 SCORE\n# F1 scor is a weighted harmonic mean of precision and recall such that\n# the best score is 1.0 and the worst is 0.0. Generally speaking,\n# F1 scores are lower than accuracy measures as they embed precision\n# and recall into their computation. As a rule of thumb, the weighted\n# average of F1 should be used to compare classifier models, not global accuracy.\n#\n# F1 Score = 2*(Recall * Precision) / (Recall + Precision)\n","repo_name":"GaoFan98/MLBootcamp","sub_path":"2 Logistic Regression/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69865680905","text":"import string\n\n\ndef lzw_encode(text):\n chars_set = list(set(list(text)))\n dictionary_size = len(chars_set)\n dictionary = {chars_set[i]: i for i in range(dictionary_size)}\n cur_str = \"\"\n compressed_data = []\n\n for symbol in text:\n string_plus_symbol = cur_str + symbol\n if string_plus_symbol in dictionary:\n cur_str = string_plus_symbol\n else:\n compressed_data.append(dictionary[cur_str])\n if len(dictionary) <= maximum_table_size:\n dictionary[string_plus_symbol] = dictionary_size\n dictionary_size += 1\n cur_str = symbol\n\n if cur_str in dictionary:\n compressed_data.append(dictionary[cur_str])\n\n return compressed_data, dictionary\n\n\nwith open(\"text.txt\", 'r') as f_in:\n input_data = f_in.read().strip().lower().translate(str.maketrans('', '', string.punctuation))\n maximum_table_size = 2 ** len(input_data)\n c_data, dict_ = lzw_encode(input_data)\n\n c_data = list(map(lambda x: bin(x)[2:], c_data))\n\n print(c_data)\n\n print(''.join(c_data))\n print(len(input_data) * 8, 'Длина исходного текста, если 1 символ = 8 бит')\n print(len(''.join(c_data)), 'Длина в битах закодированного текста')\n","repo_name":"udogaL2/dm_lab4","sub_path":"LZW.py","file_name":"LZW.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39471738695","text":"import discord\nimport math\nfrom math import sqrt\nfrom discord.ext import commands\nimport random\nimport os\nimport time\nimport wikipedia\n\n#prefix\nclient = commands.Bot(command_prefix = ('yeet.', 'yeet ', 't.', 'no '))\n#IDK\nfor file in os.listdir('./cogs'):\n if file.endswith('.py'):\n client.load_extension(f'cogs.{file[:-3]}')\n#startup\n@client.event\nasync def on_ready():\n print(\"bot has connected to discord\")\n game = discord.Game('Minecraft')\n await client.change_presence(status=discord.Status.idle, activity=game)\n\n# @client.command()\n# async def wiki(ctx, *, terms):\n# try:\n# searchterm = ' '.join(terms)\n# await ctx.send(wikipedia.summary(searchterm, sentences = 3))\n# except wikipedia.DisambiguationError:\n# await ctx.send('Try being more specific.')\n \n\n#maths\n@client.command()\nasync def add(ctx, a:int, b:int):\n await ctx.send(a + b)\n\n@client.command()\nasync def multiply(ctx, a:int, b:int):\n await ctx.send(a * b)\n\n@client.command()\nasync def subtract(ctx, a:int, b:int):\n await ctx.send(a - b)\n\n@client.command()\nasync def divide(ctx, a:int, b:int):\n await ctx.send(a / b)\n\n@client.command()\nasync def squarert(ctx, a:int):\n await ctx.send(sqrt(a))\n\n@client.command()\nasync def square(ctx, a:int):\n await ctx.send(a ** 2)\n\n\n\n\n@client.command()\nasync def eightball(ctx, a):\n l = ['It is certain',\n 'It is decidedly so', \n 'Outlook good', \n 'Ask again later, you noob',\n 'Better not tell you now', \n \"Don't count on it\", \n 'my sources say no', \n 'Very doubtful',\n 'What type of question is that?!',\n 'How do you not know the answer?!',\n 'Bruh']\n\n n = random.randint(0, len(l) - 1)\n await ctx.send(l[n]) \n \n@client.command()\nasync def say(ctx, *, sentence):\n await ctx.send(sentence)\n\n@client.command()\nasync def fib(ctx, a : int):\n def fibbonacci(a):\n\n if a == 1:\n return 1\n\n old_num = 1\n new_num = 1\n \n for i in range (a - 2):\n \n current_num = old_num + new_num\n old_num = new_num\n new_num = current_num\n \n return current_num\n await ctx.send(fibbonacci(a))\n \n\n@client.command()\nasync def spam(ctx, b:int, *, a):\n if b > 100:\n await ctx.send(\"Sorry, that is too much. (max is 100)\")\n elif b < 1:\n await ctx.send(\"Sorry, that is too little.\")\n else:\n await ctx.send((a + '\\n') * b) \n\n@client.command(pass_context=True)\nasync def ping(ctx):\n before = time.monotonic()\n message = await ctx.send(\"Pong!\")\n ping = (time.monotonic() - before) * 1000\n await message.edit(content=f\"Pong! `{int(ping)}ms`\")\n\n@client.command(aliases = [\"delete\", \"clear\"])\nasync def purge(ctx, a:int):\n await ctx.channel.purge(limit = a + 1)\n\n@client.command()\nasync def u(ctx):\n await ctx.send('no u')\n\n@client.command()\nasync def embed(ctx, title, *, content):\n embed = discord.Embed(\n title = f'{title}',\n description = f'{title}',\n color = ctx.author.color\n )\n embed.add_field(name = '', value = f\"{content}\", inline = False)\n await ctx.send(embed = embed)\n \n\n\nclient.run('NzMxOTY0MDM0ODY1Mjk5NTM3.XwtsoQ.e4qi-CNSjtWVHk25KMzpO6cZU88')","repo_name":"AndrewW-coder/codes","sub_path":"Bot/yeetbot.py","file_name":"yeetbot.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40942800259","text":"from SIM import SIM\nfrom numpy import linalg\nfrom GaussMethod import GaussMethod\n\n\nclass SquareRootMethod:\n @staticmethod\n def solve(equation):\n a_t = SquareRootMethod.transpose_matrix([line[:-1] for line in equation.A])\n\n new_b = SquareRootMethod.matrix_product(a_t, [line[-1:] for line in equation.A])\n print(new_b)\n equation.A = SquareRootMethod.matrix_product(a_t, [line[:-1] for line in equation.A])\n\n u = SquareRootMethod.find_matrix_u(equation.A)\n u_t = SquareRootMethod.transpose_matrix(u)\n\n SIM.print_matrix(u)\n\n y = SquareRootMethod.solve_system(u_t, new_b)\n x = SquareRootMethod.solve_system(u, y, len(u) - 1, -1)\n print('solution:')\n SIM.print_matrix(x)\n\n @staticmethod\n def solve_system(matrix_a, vector_b, begin=0, step=1):\n vector_x = [[0] for i in range(len(matrix_a))]\n for i in range(begin, len(matrix_a) if begin == 0 else -1, step):\n s = 0\n for j in range(len(matrix_a)):\n s -= matrix_a[i][j] * vector_x[j][0]\n vector_x[i][0] = (vector_b[i][0] + s) / matrix_a[i][i]\n\n return vector_x\n\n @staticmethod\n def matrix_product(matrix_a, matrix_b):\n matrix_c = []\n for i in range(len(matrix_a)):\n matrix_c.append([])\n for j in range(len(matrix_b[0])):\n matrix_c[i].append(0)\n for k in range(len(matrix_b)):\n matrix_c[i][j] += matrix_a[i][k] * matrix_b[k][j]\n\n # SIM.print_matrix(matrix_c)\n return matrix_c\n\n @staticmethod\n def transpose_matrix(matrix):\n matrix_t = []\n for i in range(len(matrix)):\n matrix_t.append([])\n for j in range(len(matrix)):\n matrix_t[i].append(matrix[j][i])\n\n return matrix_t\n\n @staticmethod\n def find_matrix_u(matrix):\n n = len(matrix)\n\n u = [[0] * n for i in range(n)]\n\n for i in range(n):\n for j in range(n):\n if j < i:\n continue\n s = 0\n if i == j:\n for k in range(n):\n s += u[k][i] ** 2\n u[i][j] = (matrix[i][j] - s) ** 0.5\n continue\n\n else:\n for k in range(i):\n s += u[k][i] * u[k][j]\n\n u[i][j] = 1 / u[i][i] * (matrix[i][j] - s)\n return u\n\n @staticmethod\n def matrix_det(matrix):\n det = 1\n matrix_u = SquareRootMethod.find_matrix_u(matrix)\n for i in range(len(matrix_u)):\n det *= matrix_u[i][i] ** 2.0\n\n print('kek', det, linalg.det(matrix))\n\n return det\n\n @staticmethod\n def find_inverse_matrix(matrix):\n e = [[\n 1 if i == j else 0 for j in range(len(matrix))\n ]\n for i in range(len(matrix))\n ]\n\n matrix = SquareRootMethod.matrix_product(SquareRootMethod.transpose_matrix(matrix), matrix)\n\n u = SquareRootMethod.find_matrix_u(matrix)\n u_t = SquareRootMethod.transpose_matrix(u)\n\n SIM.print_matrix(u)\n SIM.print_matrix(u_t)\n y = []\n\n for i in range(len(matrix)):\n y.append(SquareRootMethod.solve_system(u_t, [line[i:i+1] for line in e]))\n\n for i in range(len(matrix)):\n print(SquareRootMethod.solve_system(u, y[i], len(u) - 1, -1))\n\n SIM.print_matrix(linalg.inv(matrix))\n\n\n","repo_name":"gwyrwch/math-methods-labs","sub_path":"SquareRootMethod.py","file_name":"SquareRootMethod.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72589681866","text":"# Third Party\nfrom django.http import HttpResponse\n\n# Library\nfrom app.services import (\n create_line_schedule,\n create_line_station,\n get_schedule,\n get_stations,\n save_or_update_schedules,\n save_or_update_stations,\n)\n\ndef list_stations(request, latitude: str, longitude: str) -> HttpResponse:\n \"\"\"Список станций.\"\"\"\n response = get_stations(latitude, longitude)\n response.raise_for_status()\n save_or_update_stations(response.json()['stations'])\n\n station_list_response = []\n for station in response.json()['stations']:\n code = station['code']\n title = station['title']\n station_type_name = station['station_type_name']\n station_list_response.append(\n create_line_station(code, title, station_type_name)\n )\n return HttpResponse(''.join(station_list_response))\n\n\ndef station(request, code: str):\n \"\"\"Расписание станции.\"\"\"\n response = get_schedule(code)\n response.raise_for_status()\n save_or_update_schedules(response.json()['schedule'], code)\n\n schedule_list_response = []\n for schedule in response.json()['schedule']:\n title = schedule['thread']['title']\n days = schedule['days']\n arrival = schedule['arrival']\n departure = schedule['departure']\n schedule_list_response.append(\n create_line_schedule(title, days, arrival, departure)\n )\n if not schedule_list_response:\n schedule_list_response = 'Ничего нет.'\n return HttpResponse(''.join(schedule_list_response))\n","repo_name":"12masek34/transport_schedules","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20627683444","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport locale\nlocale.setlocale(locale.LC_ALL,'en_US.UTF-8')\nimport tokenizer, zemberek.normalizer, zemberek.stemmer, stopper\n\ndef perform_preprocessing(text):\n\ttext = zemberek.normalizer.normalize(text)\n\ttokens = tokenizer.tokenize(text)\n\ttokens = stopper.remove_stops(tokens)\n\ttokens = zemberek.stemmer.stem_words(tokens)\n\ttokens = stopper.remove_stops(tokens)\n\treturn tokens\n\n","repo_name":"doruksahin/KontroleDegerMi","sub_path":"py-work/pipeline/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37404699662","text":"\"\"\"\nexample:\n\tpython remove_comma.py --fname=./embedding_1_embeddings.txt --print_length=True\n\"\"\"\n\nimport argparse \n\nif __name__ == \"__main__\":\n\n\tparser = argparse.ArgumentParser() \n\tparser.add_argument('--fname', type=str, default=\"\", \n\t\thelp=\"the input txt file name\") \n\tparser.add_argument('--print_length', type=bool, default=True,\n\t\thelp=\"whether to print length of input data\")\n\targs = parser.parse_args() \n\tfname = args.fname \n\tprint_length = args.print_length\n\n\twith open(fname, 'r') as myfile:\n\t data = myfile.read().split(\",\")\n\t if print_length:\n\t \tcount = len(data)\n\t \tprint(\"length of {}: {}\".format(fname[:-4], count))\n\n\twith open(fname, 'w') as myfile:\n\t for elements in data:\n\t myfile.write(\"%s\" % elements)","repo_name":"WenqiJiang/FPGA-Based-RNN-Accelerator-Using-Vivado-HLS","sub_path":"outdated/h5/remove_comma.py","file_name":"remove_comma.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"13065638834","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule containing operation for restructuring Community / Nabolag JSON\n\n\"\"\"\n\n__author__ = 'Samir Adrik'\n__email__ = 'saÿmir.adrik@gmail.com'\n\nfrom source.util import Tracking, Assertor\n\nfrom .operation import Operation\n\n\nclass Restructure(Operation):\n \"\"\"\n Operation for restructuring Community / Nabolag JSON\n\n \"\"\"\n\n _categories = {\"AllmenneFag\": \"Allmennefaglig\", \"HelseSosial\": \"Helse og sosial\",\n \"Okonomiske\": \"Økonomiske fag\", \"NatVit\": \"Naturvitenskapelig\",\n \"HumanEstetikk\": \"Human og estetikk\", \"Samfunn\": \"Samfunnsfaglig\",\n \"Laerer\": \"Lærerutdanning\", \"Primaer\": \"Primærnæring\",\n \"Andre\": \"Andre / uoppgitt\",\n \"0-12\": \"0 år - 12 år\", \"13-18\": \"13 år - 18 år\", \"19-34\": \"19 år - 34 år\",\n \"35-64\": \"35 år - 64 år\", \"65+\": \"65+ år\",\n \"0-5\": \"0 år - 5 år\", \"6-12\": \"6 år - 12 år\",\n \"13-15\": \"13 år - 15 år\", \"16-18\": \"16 år - 18 år\",\n \"NotMarried\": \"Ugift\", \"Married\": \"Gift\", \"Separated\": \"Separert\",\n \"Widow\": \"Enke\",\n \"0-100000\": \"0 - 100K\", \"100000-200000\": \"100K - 200K\",\n \"200000-400000\": \"200K - 400K\", \"400000-500000\": \"400K - 500K\",\n \"500000-800000\": \"500K - 800K\", \"800000+\": \"800K+\",\n \"0-2000000\": \"0 - 2 mill\", \"2000000-3000000\": \"2 mill - 3 mill\",\n \"3000000-4000000\": \"3 mill - 4 mill\", \"4000000-5000000\": \"4 mill - 5 mill\",\n \"5000000-6000000\": \"5 mill - 6 mill\", \"6000000+\": \"6 mill+\",\n \"CouplesWithChildren\": \"Par med barn\", \"CouplesWithoutChildren\": \"Par uten barn\",\n \"SingleWithChildren\": \"Single med barn\", \"MultiFamilies\": \"Flerfamilie\",\n \"SingleWithoutChildren\": \"Single uten barn\",\n \"owns\": \"Eier\",\n \"rents\": \"Leier\",\n \"0-60\": \"0 - 60 m\\u00b2\",\n \"60-120\": \"60 m\\u00b2 - 120 m\\u00b2\",\n \"120-200\": \"120 m\\u00b2 - 200 m\\u00b2\",\n \"200+\": \"200 m\\u00b2+\",\n \"0-10\": \"0 år - 10 år\",\n \"10-30\": \"10 år - 30 år\",\n \"30-50\": \"30 år - 50 år\",\n \"50+\": \"50+ år\"}\n\n @Tracking\n def __init__(self, data: dict, desc: str):\n \"\"\"\n Constructor / Instantiate the class.\n\n Parameters\n ----------\n data : dict\n data to be restructured\n desc : str\n description of operation\n\n \"\"\"\n self.name = self.__class__.__name__\n Assertor.assert_data_types([data, desc], [dict, str])\n super().__init__(name=self.name, desc=\"id: {}\".format(desc))\n self.data = data\n\n @Tracking\n def run(self):\n \"\"\"\n method for running operation\n\n \"\"\"\n group = []\n neighborhood = []\n city = []\n for keys, values in self.data.items():\n if keys in (\"values\", \"stats\"):\n for value in values:\n for key, val in value.items():\n if key == \"group\":\n if val == \"People\":\n group.append(\"Befolkning\")\n elif val == \"Households\":\n group.append(\"Husholdninger\")\n else:\n group.append(\n self._categories[val].capitalize()\n if val in self._categories.keys() else val.capitalize())\n elif key in (\"percent\", \"total\"):\n for prop, elem in val.items():\n if prop == \"neighborhood\":\n neighborhood.append(elem)\n elif prop == \"city\":\n city.append(elem)\n data = {self.data.copy()[\"id\"].lower(): {\"Gruppe\": group,\n \"Nabolag\": neighborhood,\n \"By\": city if city else [0 for _ in range(\n len(neighborhood))]}}\n return data\n","repo_name":"seemir/stressa","sub_path":"source/app/processing/engine/restructure.py","file_name":"restructure.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43699996372","text":"from __future__ import absolute_import\n\nimport numpy as np\nfrom perceptron.models.base import DifferentiableModel\n\n\nclass GaussianModel(DifferentiableModel):\n \"\"\"Creates a :class:`Model` instance from a `Gaussian` module.\n\n Parameters\n ----------\n model : `torch.nn.Module`\n The PyTorch model that are loaded.\n bounds : tuple\n Tuple of lower and upper bound for the pixel values, usually\n (0, 1) or (0, 255).\n num_classes : int\n Number of classes for which the model will output predictions.\n channel_axis : int\n The index of the axis that represents color channels.\n device : string\n A string specifying the device to do computation on.\n If None, will default to \"cuda:0\" if torch.cuda.is_available()\n or \"cpu\" if not.\n preprocessing: 2-element tuple with floats or numpy arrays\n Elementwises preprocessing of input; we first subtract the first\n element of preprocessing from the input and then divide the input by\n the second element.\n \"\"\"\n\n def __init__(\n self,\n model,\n bounds=(0, 1),\n channel_axis=1,\n std=0.3,\n iterations=100,\n preprocessing=(0, 1)):\n\n super(GaussianModel, self).__init__(bounds=bounds,\n channel_axis=channel_axis,\n preprocessing=preprocessing)\n self._model = model\n self._num_classes = self._model.num_classes()\n self._iterations = iterations\n self._std = std\n\n def batch_predictions(self, images):\n \"\"\"Batch prediction of images.\"\"\"\n # lazy import\n import torch\n\n images, _ = self._process_input(images)\n n = len(images)\n labels = np.empty(shape=(n,), dtype=np.int32)\n bounds = np.empty(shape=(n,), dtype=np.float32)\n for i in range(n):\n labels[i], bounds[i] = self.predictions(images[i])\n\n return labels, bounds\n\n def predictions(self, image, forward_batch_size=32):\n from scipy.stats import norm\n image, _ = self._process_input(image)\n image_batch = np.vstack([[image]] * self._iterations)\n noise = np.random.normal(scale=self._std, size=image_batch.shape).astype(np.float32)\n image_batch += noise\n predictions = self._model.batch_predictions(image_batch)\n logits = np.argmax(predictions, axis=1)\n one_hot = np.zeros([self._iterations, self._num_classes])\n logits_one_hot = np.eye(self._num_classes)[logits]\n one_hot += logits_one_hot\n one_hot = np.sum(one_hot, axis=0)\n ranks = sorted(one_hot / np.sum(one_hot))[::-1]\n qi = ranks[0] - 1e-9\n qj = ranks[1] + 1e-9\n bound = self._std / 2. * (norm.ppf(qi) - norm.ppf(qj))\n return np.argmax(one_hot), bound\n\n def num_classes(self):\n \"\"\"Return number of classes.\"\"\"\n return self._num_classes\n\n def model_task(self):\n \"\"\"Get the task that the model is used for.\"\"\"\n return self._model.model_task()\n\n def predictions_and_gradient(self, image, label):\n \"\"\"Returns both predictions and gradients.\"\"\"\n return self._model.predictions_and_gradient(image, label)\n\n def _loss_fn(self, image, label):\n return self._loss_fn(image, label)\n\n def backward(self, gradient, image):\n \"\"\"Get gradients w.r.t. the original image.\"\"\"\n # lazy import\n return self.backward(gradient, image)\n","repo_name":"advboxes/perceptron-benchmark","sub_path":"perceptron/models/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"81"} +{"seq_id":"39323568031","text":"\"\"\"\nProblem Statement : Python program to read the contents of a file\n\"\"\"\n\"\"\"\nProblem Solution:\n1. Take the file name from the user.\n2. Use readline() function for the first line first.\n3. Use a while loop to print the first line and then read the remaining lines and print it till the end of file.\n4. Exit.\n\"\"\"\n\nfname = str(input(\"Enter the name of the file with .txt extension: \"))\nfopen = open(fname, 'r')\nline = fopen.readline()\nwhile(line!=\"\"):\n print (line)\n line = fopen.readline()\nfopen.close()\n","repo_name":"rajesh1994/python_program_excercise","sub_path":"9. Python Programs on File Handling/1_python_program_to_read_the_contents_of_a_file.py","file_name":"1_python_program_to_read_the_contents_of_a_file.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36448461151","text":"from sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import StratifiedKFold, KFold\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import backend as K\nfrom imblearn.over_sampling import RandomOverSampler\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom imblearn.over_sampling import SMOTENC\nimport pandas as pd\nimport numpy as np\n\n\ndef run_bootstrap_stratified_validation(\n model_df,\n ml_model,\n features_columns,\n if_scale_data,\n if_one_hot,\n model_type=\"sklearn\",\n stratify_col='facility_type_parsed',\n response_col=\"site_eui\",\n if_output_prediction_results=False,\n resample_param_dict={},\n imputer=None,\n n_bootstraps=5,\n):\n # Sample the train/validation dataset so it resembles true test distribution\n bootstrap_strat_df = pd.DataFrame()\n for i in range(n_bootstraps):\n print(f\"Starting bootstrap {i}\")\n sub_model_df = heuristic_sample_to_true_test(\n model_df, col_to_resample='facility_type_parsed')\n stratified_model_result_df = run_stratified_validation(sub_model_df, ml_model, features_columns,\n if_scale_data, if_one_hot, model_type=model_type,\n stratify_col=stratify_col, response_col=response_col,\n if_output_prediction_results=if_output_prediction_results,\n resample_param_dict=resample_param_dict,\n imputer=imputer)\n print(type(stratified_model_result_df), stratified_model_result_df)\n bootstrap_strat_df = pd.concat(\n [bootstrap_strat_df, stratified_model_result_df.assign(bootstrap=i)])\n return bootstrap_strat_df\n\n\ndef run_stratified_validation(\n model_df,\n ml_model,\n features_columns,\n if_scale_data,\n if_one_hot,\n model_type=\"sklearn\",\n stratify_col='facility_type_parsed',\n response_col=\"site_eui\",\n if_output_prediction_results=False,\n resample_param_dict={},\n imputer=None,\n):\n # Define which function to run\n run_model_dict = {\n \"sklearn\": run_sklearn_model,\n \"catboost\": run_catboost_model,\n \"lightgbm\": run_lgb_model,\n \"dnn\": run_dnn_model,\n }\n assert model_type in run_model_dict.keys(\n ), f\"{model_type} not in {run_model_dict.keys()}\"\n\n all_stratified_model_result = []\n prediction_result_train_dict = {}\n prediction_result_test_dict = {}\n print(f\"Running {model_type}\")\n\n # Instantiate stratified kfold model\n skf = StratifiedKFold()\n # Note stratify_y is not the target for regression, but rather the target\n # for stratifying train/test split for validation (eg facility type)\n X = model_df.drop(columns=stratify_col)\n stratify_y = model_df[stratify_col].map(\n dict(\n zip(\n np.sort(model_df[stratify_col].unique()),\n np.arange(model_df[stratify_col].nunique()),\n )\n )\n ).values\n\n # Each train/test split is ~4:1 equal ratio of stratify_y\n for strat, (train_inds, test_inds) in enumerate(skf.split(X.values, stratify_y)):\n print(f\"Modeling strat {strat}\")\n # Prep/preprocess\n train_df = model_df.iloc[train_inds]\n test_df = model_df.iloc[test_inds]\n train_x_df, train_y_df = split_model_feature_response(\n train_df, features_columns, response_col=response_col\n )\n test_x_df, test_y_df = split_model_feature_response(\n test_df, features_columns, response_col=response_col\n )\n train_x_df, test_x_df = process_train_test_data(\n train_x_df, test_x_df, if_scale_data, if_one_hot, model_df, imputer=imputer\n )\n # Run regression model\n train_predict, test_predict, fitted_model = run_model_dict[model_type](\n ml_model, train_x_df, train_y_df, test_x_df,\n validation_data=(test_x_df.values,\n test_y_df.values),\n )\n # Calculate training & validation metrics\n train_rmse = calculate_rmse(train_y_df, train_predict)\n test_rmse = calculate_rmse(test_y_df, test_predict)\n stratified_result_df = pd.DataFrame(\n {\n \"stratification\": strat,\n \"train_rmse\": train_rmse,\n \"test_rmse\": test_rmse,\n },\n index=[0],\n )\n all_stratified_model_result.append(stratified_result_df)\n prediction_result_train_dict[strat] = train_predict\n prediction_result_test_dict[strat] = test_predict\n all_stratified_model_result_df = pd.concat(\n all_stratified_model_result).reset_index(drop=True)\n if if_output_prediction_results:\n return all_stratified_model_result_df, prediction_result_train_dict, prediction_result_test_dict\n else:\n return all_stratified_model_result_df\n\n\ndef run_leave_year_out(\n model_df,\n ml_model,\n features_columns,\n if_scale_data,\n if_one_hot,\n model_type=\"sklearn\",\n response_col=\"site_eui\",\n if_output_prediction_results=False,\n resample_param_dict={},\n imputer=None,\n):\n # Define which function to run\n run_model_dict = {\n \"sklearn\": run_sklearn_model,\n \"catboost\": run_catboost_model,\n \"lightgbm\": run_lgb_model,\n \"dnn\": run_dnn_model,\n }\n assert model_type in run_model_dict.keys(\n ), f\"{model_type} not in {run_model_dict.keys()}\"\n all_loy_model_result = []\n all_year = model_df[\"year_factor\"].unique()\n prediction_result_train_dict = {}\n prediction_result_test_dict = {}\n print(f\"Running {model_type}\")\n for one_year in all_year:\n print(f\"Modeling {one_year}...\")\n (\n left_out_test_x_df,\n left_out_test_y_df,\n left_out_train_x_df,\n left_out_train_y_df,\n ) = train_test_split(one_year, model_df, features_columns, response_col)\n if len(resample_param_dict) > 0:\n train_for_resample_df = left_out_train_x_df\n train_for_resample_df[response_col] = left_out_train_y_df\n up_or_downsample = resample_param_dict[\"up_or_downsample\"]\n resample_by_col = resample_param_dict[\"resample_by_col\"]\n resample_type = resample_param_dict[\"resample_type\"]\n if up_or_downsample == \"upsample\":\n train_after_resampled_df = upsampling_by_column(\n train_for_resample_df, resample_by_col, resample_type=resample_type\n )\n elif up_or_downsample == \"downsample\":\n train_after_resampled_df = downsampling_by_column(\n train_for_resample_df, resample_by_col, resample_type=resample_type\n )\n elif up_or_downsample == \"custom_upsample\":\n print(\"getting custom upsample\")\n df_to_get_weights = model_df.query(\n f\"year_factor != {one_year}\")\n train_after_resampled_df = custom_weighted_upsample(\n train_for_resample_df, df_to_get_weights, resample_by_col)\n left_out_train_x_df, left_out_train_y_df = split_model_feature_response(\n train_after_resampled_df,\n features_columns,\n if_with_response=True,\n response_col=response_col,\n )\n left_out_train_x_df, left_out_test_x_df = process_train_test_data(\n left_out_train_x_df, left_out_test_x_df, if_scale_data, if_one_hot, model_df, imputer=imputer\n )\n train_predict, test_predict, fitted_model = run_model_dict[model_type](\n ml_model, left_out_train_x_df, left_out_train_y_df, left_out_test_x_df,\n validation_data=(left_out_test_x_df.values,\n left_out_test_y_df.values),\n )\n train_rmse = calculate_rmse(left_out_train_y_df, train_predict)\n test_rmse = calculate_rmse(left_out_test_y_df, test_predict)\n one_year_result_df = pd.DataFrame(\n {\n \"left_out_year\": one_year,\n \"train_rmse\": train_rmse,\n \"test_rmse\": test_rmse,\n },\n index=[0],\n )\n all_loy_model_result.append(one_year_result_df)\n prediction_result_train_dict[one_year] = train_predict\n prediction_result_test_dict[one_year] = test_predict\n all_loy_model_result_df = pd.concat(\n all_loy_model_result).reset_index(drop=True)\n if if_output_prediction_results:\n return all_loy_model_result_df, prediction_result_train_dict, prediction_result_test_dict\n else:\n return all_loy_model_result_df\n\n\ndef train_test_split(level, model_df, features_columns, response_col=\"site_eui\"):\n left_out_test = model_df.query(f\"year_factor == {level}\")\n left_out_train = model_df.query(f\"year_factor != {level}\")\n left_out_test_x_df, left_out_test_y_df = split_model_feature_response(\n left_out_test, features_columns, response_col=response_col\n )\n left_out_train_x_df, left_out_train_y_df = split_model_feature_response(\n left_out_train, features_columns, response_col=response_col\n )\n return (\n left_out_test_x_df,\n left_out_test_y_df,\n left_out_train_x_df,\n left_out_train_y_df,\n )\n\n\ndef split_model_feature_response(\n model_df, features_columns, if_with_response=True, response_col=\"site_eui\"\n):\n model_x_df = model_df[features_columns]\n if if_with_response:\n model_y_df = model_df[response_col]\n return model_x_df, model_y_df\n else:\n return model_x_df\n\n\ndef one_hot_encode_data(train_x_df, test_x_df, full_data_df):\n categorical_columns_to_dummy = output_non_numeric_columns(train_x_df)\n # print(f\"Columns to be dummied: {categorical_columns_to_dummy}\")\n for col in categorical_columns_to_dummy:\n encoder = get_one_hot_encoder(full_data_df[[col]])\n one_hot_encoded_column_name = [\n f\"{col}_{ind}\" for ind in range(full_data_df[col].nunique())\n ]\n train_one_hot_encoded = encoder.transform(train_x_df[[col]])\n train_one_hot_encoded = pd.DataFrame(\n train_one_hot_encoded,\n columns=one_hot_encoded_column_name,\n index=train_x_df.index,\n )\n test_one_hot_encoded = encoder.transform(test_x_df[[col]])\n test_one_hot_encoded = pd.DataFrame(\n test_one_hot_encoded,\n columns=one_hot_encoded_column_name,\n index=test_x_df.index,\n )\n train_x_df = pd.concat(\n [train_x_df, train_one_hot_encoded], axis=\"columns\")\n test_x_df = pd.concat(\n [test_x_df, test_one_hot_encoded], axis=\"columns\")\n train_x_df = train_x_df.drop(columns=categorical_columns_to_dummy)\n test_x_df = test_x_df.drop(columns=categorical_columns_to_dummy)\n return train_x_df, test_x_df\n\n\ndef process_train_test_data(train_x_df, test_x_df, if_scale_data, if_one_hot, full_data_df, imputer=None):\n if if_one_hot:\n train_x_df, test_x_df = one_hot_encode_data(\n train_x_df, test_x_df, full_data_df)\n if if_scale_data:\n train_x_df, test_x_df = scale_data(train_x_df, test_x_df)\n if imputer:\n train_x_df, test_x_df = run_imputer(\n imputer, train_x_df, test_x_df, full_data_df)\n return train_x_df, test_x_df\n\n\ndef run_imputer(imputer, train_x_df, test_x_df, full_data_df):\n # Pre-process categorical features -> one hot encoding\n categorical_columns_to_dummy = output_non_numeric_columns(train_x_df)\n if categorical_columns_to_dummy:\n train_x_df, test_x_df = one_hot_encode_data(\n train_x_df.copy(), test_x_df.copy(), full_data_df)\n # Run imputer\n train_x_impute_df = imputer.fit_transform(train_x_df)\n test_x_impute_df = imputer.transform(test_x_df)\n\n train_x_impute_df = pd.DataFrame(\n train_x_impute_df, columns=train_x_df.columns\n )\n test_x_impute_df = pd.DataFrame(\n test_x_impute_df, columns=test_x_df.columns\n )\n # Reverse one-hot encoding -> categorical features\n for col in categorical_columns_to_dummy:\n one_hot_encoded_column_name = [\n f\"{col}_{ind}\" for ind in range(full_data_df[col].nunique())\n ]\n one_hot_to_cat_dict = dict(zip(one_hot_encoded_column_name,\n np.sort(full_data_df[col].unique())))\n train_x_impute_df[col] = train_x_impute_df[one_hot_encoded_column_name].idxmax(\n 1).map(one_hot_to_cat_dict)\n test_x_impute_df[col] = test_x_impute_df[one_hot_encoded_column_name].idxmax(\n 1).map(one_hot_to_cat_dict)\n train_x_impute_df = train_x_impute_df.drop(\n columns=one_hot_encoded_column_name)\n test_x_impute_df = test_x_impute_df.drop(\n columns=one_hot_encoded_column_name)\n return train_x_impute_df, test_x_impute_df\n\n\ndef output_non_numeric_columns(model_df):\n numeric_columns = list(model_df._get_numeric_data().columns)\n all_columns = list(model_df.columns)\n non_numeric_columns = list(set(all_columns) - set(numeric_columns))\n return non_numeric_columns\n\n\ndef scale_data(train_x, test_x):\n scaler = StandardScaler()\n scaler = scaler.fit(train_x)\n scaled_train_x = scaler.transform(train_x)\n scaled_test_x = scaler.transform(test_x)\n scaled_train_x = pd.DataFrame(\n scaled_train_x, columns=train_x.columns, index=train_x.index)\n scaled_test_x = pd.DataFrame(\n scaled_test_x, columns=test_x.columns, index=test_x.index)\n return scaled_train_x, scaled_test_x\n\n\ndef get_one_hot_encoder(train_df):\n enc = OneHotEncoder(sparse=False)\n return enc.fit(train_df)\n\n\ndef run_sklearn_model(sklearn_model, train_x_df, train_y_df, test_x_df, validation_data=None):\n fitted_model = fit_sklearn_model(sklearn_model, train_x_df, train_y_df)\n train_predict = run_sklearn_predict(fitted_model, train_x_df)\n test_predict = run_sklearn_predict(fitted_model, test_x_df)\n return train_predict, test_predict, fitted_model\n\n\ndef fit_lgb_model(model, train_x, train_y):\n # fit_params = {\n # \"early_stopping_rounds\": 100,\n # \"eval_metric\": \"rmse\",\n # # \"eval_set\": [(X_eval, y_eval)],\n # \"eval_names\": [\"valid\"],\n # \"verbose\": 1000,\n # }\n # model.fit(train_x, train_y, **fit_params)\n model.fit(train_x, train_y)\n return model\n\n\ndef run_lgb_model(lgb_model, train_x_df, train_y_df, test_x_df, validation_data=None):\n fitted_model = fit_lgb_model(lgb_model, train_x_df, train_y_df)\n train_predict = run_sklearn_predict(fitted_model, train_x_df)\n test_predict = run_sklearn_predict(fitted_model, test_x_df)\n return train_predict, test_predict, fitted_model\n\n\ndef fit_sklearn_model(model, train_x, train_y):\n model.fit(train_x, train_y)\n return model\n\n\ndef run_sklearn_predict(model, test_x):\n predict_result = model.predict(test_x)\n return predict_result\n\n\ndef calculate_rmse(true_y, predict_y):\n return mean_squared_error(true_y, predict_y, squared=False)\n\n\ndef run_catboost_model(model, train_x_df, train_y_df, test_x_df, validation_data=None):\n cat_columns = train_x_df.select_dtypes([\"O\"]).columns.tolist()\n model.fit(train_x_df, y=train_y_df, cat_features=cat_columns)\n train_predict = model.predict(train_x_df)\n test_predict = model.predict(test_x_df)\n return train_predict, test_predict, model\n\n\ndef run_dnn_model(model, train_x_df, train_y_df, test_x_df, validation_data=None):\n if model is None:\n model = build_and_compile_dnn_model(train_x_df)\n validation_split = 0 if validation_data else 0.2\n # Stop training if loss doesn't improve from min value over 10 iterations\n callback = tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', mode='min', patience=10, restore_best_weights=True)\n # Fit nn - note fitting will essentially pick up from last state\n\n model.fit(train_x_df, train_y_df, verbose=1, validation_data=validation_data,\n validation_split=validation_split,\n epochs=100, callbacks=[callback])\n train_predict = model.predict(train_x_df)\n test_predict = model.predict(test_x_df)\n return train_predict, test_predict, model\n\n\ndef build_and_compile_dnn_model(input_features):\n normalizer = tf.keras.layers.Normalization(axis=-1)\n normalizer.adapt(np.array(input_features))\n\n model = keras.Sequential([\n normalizer,\n layers.Dense(64, activation='elu'),\n layers.Dense(64, activation='elu'),\n layers.Dense(1)\n ])\n\n model.compile(loss=tf_rmse, # 'mean_absolute_error',\n optimizer=tf.keras.optimizers.Adam(0.001),\n metrics=[tf.keras.metrics.RootMeanSquaredError()])\n return model\n\n\ndef tf_rmse(y_true, y_pred):\n return K.sqrt(K.mean(K.square(y_true - y_pred)))\n\n\ndef tf_rmsle(y_true, y_pred):\n return K.sqrt(K.mean(K.square(K.log(1.+y_true) - K.log(1+y_pred))))\n\n\ndef upsampling_by_column(train_df, resample_by_col, resample_type=\"random\"):\n if resample_type == \"random\":\n train_x_to_resample = train_df.drop(columns=resample_by_col)\n train_y_to_resample = train_df[resample_by_col]\n oversampler = RandomOverSampler(random_state=42)\n train_x_resampled, train_y_resampled = oversampler.fit_resample(\n train_x_to_resample, train_y_to_resample\n )\n elif resample_type == \"smote\":\n train_dropna_df = train_df.dropna(how=\"any\")\n train_x_to_resample = train_dropna_df.drop(columns=resample_by_col)\n train_y_to_resample = train_dropna_df[resample_by_col]\n non_numeric_columns = output_non_numeric_columns(train_x_to_resample)\n categorical_column_index = [\n train_x_to_resample.columns.get_loc(c)\n for c in non_numeric_columns\n if c in train_x_to_resample\n ]\n sm = SMOTENC(random_state=42,\n categorical_features=categorical_column_index)\n train_x_resampled, train_y_resampled = sm.fit_resample(\n train_x_to_resample, train_y_to_resample\n )\n final_resampled_train_df = train_x_resampled\n final_resampled_train_df[resample_by_col] = train_y_resampled\n return final_resampled_train_df\n\n\ndef custom_weighted_upsample(df_to_resample, df_to_get_weights, resample_by_col):\n # get the levels that needs upsampled and the needed sample number\n column_level_counts = df_to_resample[resample_by_col].value_counts()\n final_sampled_number = column_level_counts.max()\n column_level_to_resample = column_level_counts[column_level_counts !=\n final_sampled_number]\n column_level_number_to_resample = final_sampled_number - column_level_to_resample\n # upsample per level\n all_resampled_list = []\n for level, number in column_level_number_to_resample.iteritems():\n level_to_resample_from_df = df_to_resample.query(\n f\"{resample_by_col} == '{level}'\")\n level_resample_weights = df_to_get_weights.query(f\"{resample_by_col} == '{level}'\")[\n \"resample_weights\"\n ].values\n assert level_to_resample_from_df.shape[0] == len(\n level_resample_weights)\n resampled_data = level_to_resample_from_df.sample(\n n=number, replace=True, weights=level_resample_weights, random_state=None, axis=0\n )\n all_resampled_list.append(resampled_data)\n # combine original data with upsampled data\n all_resampled_df = pd.concat(all_resampled_list)\n final_resampled_data = pd.concat([df_to_resample, all_resampled_df])\n return final_resampled_data\n\n\ndef downsampling_by_column(train_df, resample_by_col, resample_type=\"random\"):\n train_x_to_resample = train_df.drop(columns=resample_by_col)\n train_y_to_resample = train_df[resample_by_col]\n if resample_type == \"random\":\n undersampler = RandomUnderSampler(\n random_state=42, sampling_strategy=\"majority\")\n train_x_resampled, train_y_resampled = undersampler.fit_resample(\n train_x_to_resample, train_y_to_resample\n )\n final_resampled_train_df = train_x_resampled\n final_resampled_train_df[resample_by_col] = train_y_resampled\n return final_resampled_train_df\n\n\ndef run_model_predict_unknown_test_by_column(\n train_df, test_df, full_data_df, features_columns, response_col, if_scale, if_one_hot, model\n):\n all_year_factor = train_df[\"year_factor\"].unique()\n test_prediction_result = []\n for one_year in all_year_factor:\n print(f\"Modeling {one_year}...\")\n train_filter_df = train_df.query(f\"year_factor != {one_year}\")\n test_filter_df = test_df.query(f\"year_factor == {one_year}\")\n train_filter_x_df, train_filter_y_df = split_model_feature_response(\n train_filter_df, features_columns, if_with_response=True, response_col=response_col\n )\n test_filter_x_df = split_model_feature_response(\n test_filter_df, features_columns, if_with_response=False\n )\n processed_train_x_df, processed_test_x_df = process_train_test_data(\n train_filter_x_df, test_filter_x_df, if_scale, if_one_hot, full_data_df\n )\n train_predict, test_predict, fitted_model = run_sklearn_model(\n model, processed_train_x_df, train_filter_y_df, processed_test_x_df\n )\n test_predict_df = test_filter_df[[\"id\"]]\n test_predict_df.loc[:, f\"predict_{response_col}\"] = test_predict\n test_predict_df.loc[:, \"year_factor\"] = one_year\n test_prediction_result.append(test_predict_df)\n training_rmse = calculate_rmse(train_filter_y_df, train_predict)\n num_unique_test_predict = len(np.unique(test_predict))\n print(\n f\"{one_year} train rmse: {training_rmse}, num unique test prediction: {num_unique_test_predict}\"\n )\n all_test_prediction_result = pd.concat(test_prediction_result)\n return all_test_prediction_result\n\n\ndef heuristic_sample_to_true_test(train_df, col_to_resample='facility_type_parsed'):\n resample_cols_reduce_dict = {\"Multifamily\": 0.7, \"Office\": 0.25}\n sample_reduce_inds_dict = {}\n inds_to_drop = []\n for ftp, frac in resample_cols_reduce_dict.items():\n itd = (\n train_df.loc[train_df[col_to_resample] == ftp]\n .sample(frac=frac).index\n )\n sample_reduce_inds_dict[ftp] = itd\n inds_to_drop += list(itd)\n\n resample_cols_augment_dict = {\"Unit_Building\": 1}\n sample_augment_inds_dict = {}\n inds_to_add = []\n for ftp, frac in resample_cols_augment_dict.items():\n itd = (\n train_df.loc[train_df[col_to_resample] == ftp]\n .sample(frac=frac).index\n )\n sample_augment_inds_dict[ftp] = itd\n inds_to_add += list(itd)\n new_train_df = pd.concat(\n [\n train_df.drop(index=inds_to_drop),\n train_df.iloc[inds_to_add],\n ]\n )\n return new_train_df\n\n\ndef process_loy_train_test_prediction(\n loy_prediction_result_train_dict, loy_prediction_result_test_dict, model_df\n):\n all_year_factor = list(loy_prediction_result_train_dict.keys())\n all_loy_train_predict = []\n all_loy_test_predict = []\n for one_year in all_year_factor:\n left_year_train_df = model_df.query(\n f\"year_factor != {one_year}\")[[\"id\"]]\n left_year_test_df = model_df.query(\n f\"year_factor == {one_year}\")[[\"id\"]]\n one_left_year_train_df = loy_prediction_result_train_dict[one_year]\n one_left_year_test_df = loy_prediction_result_test_dict[one_year]\n left_year_train_df[\"train_prediction\"] = one_left_year_train_df\n left_year_train_df[\"left_year\"] = one_year\n left_year_test_df[\"test_prediction\"] = one_left_year_test_df\n left_year_test_df[\"left_year\"] = one_year\n all_loy_train_predict.append(left_year_train_df)\n all_loy_test_predict.append(left_year_test_df)\n all_loy_train_predict_df = pd.concat(all_loy_train_predict)\n all_loy_test_predict_df = pd.concat(all_loy_test_predict)\n return all_loy_train_predict_df, all_loy_test_predict_df\n","repo_name":"hannahpu/widsdatathon2022","sub_path":"utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":24451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74822359626","text":"class Vertex:\n def __init__(self, idx):\n self.idx = idx\n self.visited = False\n self.hasCycle = None\n self.pre = []\n\nclass Solution:\n # @param {integer} numCourses\n # @param {integer[][]} prerequisites\n # @return {boolean}\n def canFinish(self, numCourses, prerequisites):\n # construct graph using adjacency list\n self.adjList = [None] * numCourses\n for edge in prerequisites:\n idx = edge[0]\n if self.adjList[idx] == None:\n self.adjList[idx] = Vertex(idx)\n self.adjList[idx].pre.append(edge[1]) # add target idx\n \n # find cycle recursively\n for v in self.adjList:\n if self.hasCycle(v):\n return False\n return True\n \n def hasCycle(self, v):\n if v == None:\n return False\n \n if v.visited == True:\n if v.hasCycle == False:\n return False\n else:\n return True\n \n v.visited = True\n \n for n in v.pre:\n if self.hasCycle(self.adjList[n]):\n return True\n\n v.hasCycle = False\n return False","repo_name":"AlgoLINE/algoLINE","sub_path":"13_August_1st/seonghun/canFinish.py","file_name":"canFinish.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72206934985","text":"#!/usr/bin/python\n##########################################################\n# join all part files in a dir created by split.py.\n# This is roughly like a 'cat fromdir/* > tofile' command\n# on unix, but is a bit more portable and configurable,\n# and exports the join operation as a reusable function.\n# Relies on sort order of file names: must be same length.\n# Could extend split/join to popup Tkinter file selectors.\n##########################################################\n\nimport os, sys\nimport re\nfrom utils.class_shared_utilites import shared_utilites\n\nreadsize = 1500\nclass class_join_join:\n def join(fromdir, tofile):\n shared_utilites.createDirfromFile(tofile)\n tofile = tofile.replace(\"\\\\\",\"/\")\n output = open(tofile,encoding=\"utf-8\", mode='w')\n parts = os.listdir(fromdir)\n parts.sort( )\n for filename in parts:\n filepath = os.path.join(fromdir, filename)\n fileobj = open(filepath,encoding=\"utf-8\", mode='r')\n while 1:\n filebytes = fileobj.read(readsize)\n if not filebytes: break\n output.write(filebytes)\n fileobj.close( )\n # os.unlink(fromdir+filename)\n output.close( )\n\n def join_filename(directory, filename):\n # extract the basename and extension from the filename\n pattern = re.compile(f\"{filename}part\\d+$\")\n # get a list of all files in the directory that match the pattern\n partfiles = [f for f in os.listdir(directory) if pattern.match(f)]\n # sort the list of files by their numeric part\n partfiles.sort(key=lambda f: int(f[len(filename) + 4:]))\n\n # create the output file and concatentate the part files into it\n output_filename = os.path.join(directory, filename)\n with open(output_filename, 'w', encoding='utf-8') as output:\n for partfile in partfiles:\n partfile_path = os.path.join(directory, partfile)\n with open(partfile_path, 'r', encoding='utf-8') as part:\n output.write(part.read())\n os.remove(partfile_path)\n for f in partfiles:\n if f != filename:\n try:\n os.remove(os.path.join(directory, f))\n except FileNotFoundError:\n pass\n return output_filename\nif __name__ == '__main__':\n if len(sys.argv) == 2 and sys.argv[1] == '-help':\n print('Use: class_join.py [from-dir-name to-file-name]')\n else:\n if len(sys.argv) != 3:\n interactive = 1\n fromdir = input('Directory containing part files? ')\n tofile = input('Name of file to be recreated? ')\n else:\n interactive = 0\n fromdir, tofile = sys.argv[1:]\n absfrom, absto = map(os.path.abspath, [fromdir, tofile])\n print('Joining', absfrom, 'to make', absto)\n\n try:\n join(fromdir, tofile)\n except:\n print('Error joining files:')\n # print(sys.exc_type, sys.exc_value)\n else:\n print('Join complete: see', absto)\n if interactive: input('Press Enter key') # pause if clicked","repo_name":"spspider/QRfiles","sub_path":"utils/class_join.py","file_name":"class_join.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41164251725","text":"import re\nimport numpy\nimport math\ntry:\n # Python 3.x\n import collections.abc as collections\nexcept ImportError:\n # Python 2.x\n import collections\nimport PyQt5.QtCore\n\n# Load dependencies\nimport ovito\nimport ovito.data\n\n# Load the native code module\nimport Particles\n\n# Inject selected classes into parent module.\novito.data.SimulationCell = Particles.SimulationCell\novito.data.ParticleProperty = Particles.ParticleProperty\novito.data.Bonds = Particles.Bonds\novito.data.SurfaceMesh = Particles.SurfaceMesh\novito.data.ParticleTypeProperty = Particles.ParticleTypeProperty\novito.data.ParticleType = Particles.ParticleType\novito.data.BondProperty = Particles.BondProperty\novito.data.BondTypeProperty = Particles.BondTypeProperty\novito.data.BondType = Particles.BondType\n\n# For backward-compatibility with OVITO 2.5.1:\ndef _ParticleProperty_data_attribute_name(self):\n if self.type != Particles.ParticleProperty.Type.User:\n return re.sub('\\W|^(?=\\d)','_', self.name).lower()\n else:\n return None\nParticles.ParticleProperty._data_attribute_name = property(_ParticleProperty_data_attribute_name)\n\n# Access particle and bond properties by their name (not display title).\ndef _ParticleProperty_data_key(self):\n return self.name\nParticles.ParticleProperty._data_key = property(_ParticleProperty_data_key)\nParticles.BondProperty._data_key = property(_ParticleProperty_data_key)\n\n# Implement the 'particle_properties' attribute of the DataCollection class.\ndef _DataCollection_particle_properties(self):\n \"\"\"\n Returns a dictionary view that provides access to the :py:class:`ParticleProperty` \n instances stored in this :py:class:`!DataCollection`.\n \"\"\"\n \n # Helper class used to implement the 'particle_properties' property of the DataCollection class.\n class _ParticlePropertyView(collections.Mapping):\n \n def __init__(self, data_collection):\n self._data_collection = data_collection\n \n def __len__(self):\n property_count = 0\n for obj in self._data_collection.objects:\n if isinstance(obj, ovito.data.ParticleProperty): property_count += 1\n return property_count\n \n def __getitem__(self, key):\n if not isinstance(key, str):\n raise TypeError(\"Property name key is not a string.\")\n for obj in self._data_collection.objects:\n if isinstance(obj, ovito.data.ParticleProperty): \n if obj.name == key:\n return obj\n raise KeyError(\"The DataCollection contains no particle property with the name '%s'.\" % key)\n \n def __iter__(self):\n for obj in self._data_collection.objects:\n if isinstance(obj, ovito.data.ParticleProperty):\n yield obj.name\n \n def __getattr__(self, name):\n for obj in self._data_collection.objects:\n if isinstance(obj, ovito.data.ParticleProperty): \n if obj.type != ovito.data.ParticleProperty.Type.User and re.sub('\\W|^(?=\\d)','_', obj.name).lower() == name:\n return obj\n raise AttributeError(\"DataCollection does not contain the particle property '%s'.\" % name)\n \n def __repr__(self):\n return repr(dict(self))\n \n return _ParticlePropertyView(self)\novito.data.DataCollection.particle_properties = property(_DataCollection_particle_properties)\n\n# Implement the 'bond_properties' attribute of the DataCollection class.\ndef _DataCollection_bond_properties(self):\n \"\"\"\n Returns a dictionary view that provides access to the :py:class:`BondProperty` \n instances stored in this :py:class:`!DataCollection`.\n \"\"\"\n \n # Helper class used to implement the 'bond_properties' property of the DataCollection class.\n class _BondPropertyView(collections.Mapping):\n \n def __init__(self, data_collection):\n self._data_collection = data_collection\n \n def __len__(self):\n property_count = 0\n for obj in self._data_collection.objects:\n if isinstance(obj, ovito.data.BondProperty): property_count += 1\n return property_count\n \n def __getitem__(self, key):\n if not isinstance(key, str):\n raise TypeError(\"Property name key is not a string.\")\n for obj in self._data_collection.objects:\n if isinstance(obj, ovito.data.BondProperty): \n if obj.name == key:\n return obj\n raise KeyError(\"The DataCollection contains no bond property with the name '%s'.\" % key)\n \n def __iter__(self):\n for obj in self._data_collection.objects:\n if isinstance(obj, ovito.data.BondProperty):\n yield obj.name\n \n def __getattr__(self, name):\n for obj in self._data_collection.objects:\n if isinstance(obj, ovito.data.BondProperty): \n if obj.type != ovito.data.BondProperty.Type.User and re.sub('\\W|^(?=\\d)','_', obj.name).lower() == name:\n return obj\n raise AttributeError(\"DataCollection does not contain the bond property '%s'.\" % name)\n \n return _BondPropertyView(self)\novito.data.DataCollection.bond_properties = property(_DataCollection_bond_properties)\n\n# Implement 'cell' attribute of DataCollection class.\ndef _DataCollection_cell(self):\n \"\"\"\n Returns the :py:class:`SimulationCell` stored in this :py:class:`!DataCollection`.\n \n Accessing this property raises an ``AttributeError`` if the data collection\n contains no simulation cell information.\n \"\"\"\n for obj in self.objects:\n if isinstance(obj, ovito.data.SimulationCell):\n return obj\n raise AttributeError(\"This DataCollection contains no simulation cell.\")\novito.data.DataCollection.cell = property(_DataCollection_cell)\n\n# Implement 'bonds' attribute of DataCollection class.\ndef _DataCollection_bonds(self):\n \"\"\"\n Returns the :py:class:`Bonds` object stored in this :py:class:`!DataCollection`.\n \n Accessing this property raises an ``AttributeError`` if the data collection\n contains no bonds.\n \"\"\"\n for obj in self.objects:\n if isinstance(obj, ovito.data.Bonds):\n return obj\n raise AttributeError(\"This DataCollection contains no bonds data object.\")\novito.data.DataCollection.bonds = property(_DataCollection_bonds)\n\n# Implement 'surface' attribute of DataCollection class.\ndef _DataCollection_surface(self):\n \"\"\"\n Returns the :py:class:`SurfaceMesh` in this :py:class:`!DataCollection`.\n \n Accessing this property raises an ``AttributeError`` if the data collection\n contains no surface mesh instance.\n \"\"\"\n for obj in self.objects:\n if isinstance(obj, ovito.data.SurfaceMesh):\n return obj\n raise AttributeError(\"This DataCollection contains no surface mesh.\")\novito.data.DataCollection.surface = property(_DataCollection_surface)\n\n# Returns a NumPy array wrapper for a particle property.\ndef _ParticleProperty_array(self):\n \"\"\" \n This attribute returns a NumPy array, which provides read access to the per-particle data stored in this particle property object.\n \n The returned array is one-dimensional for scalar particle properties (:py:attr:`.components` == 1),\n or two-dimensional for vector properties (:py:attr:`.components` > 1). The outer length of the array is \n equal to the number of particles in both cases.\n \n Note that the returned NumPy array is read-only and provides a view of the internal data. \n No copy of the data, which may be shared by multiple objects, is made. If you want to modify the \n data stored in this particle property, use :py:attr:`.marray` instead.\n \"\"\"\n return numpy.asarray(self)\nParticles.ParticleProperty.array = property(_ParticleProperty_array)\n\n# Returns a NumPy array wrapper for a particle property with write access.\ndef _ParticleProperty_marray(self):\n \"\"\" \n This attribute returns a *mutable* NumPy array providing read/write access to the internal per-particle data.\n \n The returned array is one-dimensional for scalar particle properties (:py:attr:`.components` == 1),\n or two-dimensional for vector properties (:py:attr:`.components` > 1). The outer length of the array is \n equal to the number of particles in both cases.\n \n .. note::\n \n After you are done modifying the data in the returned NumPy array, you must call\n :py:meth:`.changed`! Calling this method is necessary to inform the data pipeline system\n that the input particle data has changed and the modification pipeline needs to be re-evaluated.\n The reason is that OVITO cannot automatically detect modifications made by the script to \n the returned NumPy array. Therefore, an explicit call to :py:meth:`.changed` is necessary. \n \n **Example**\n \n .. literalinclude:: ../example_snippets/mutable_array.py\n \n \"\"\"\n class DummyClass:\n pass\n o = DummyClass()\n o.__array_interface__ = self.__mutable_array_interface__\n # Create reference to particle property object to keep it alive.\n o.__base_property = self \n return numpy.asarray(o)\n\n# This is needed to enable the augmented assignment operators (+=, -=, etc.) for the 'marray' property.\ndef _ParticleProperty_marray_assign(self, other):\n if not hasattr(other, \"__array_interface__\"):\n raise ValueError(\"Only objects supporting the array interface can be assigned to the 'marray' property.\")\n o = other.__array_interface__\n s = self.__mutable_array_interface__\n if o[\"shape\"] != s[\"shape\"] or o[\"typestr\"] != s[\"typestr\"] or o[\"data\"] != s[\"data\"]:\n raise ValueError(\"Assignment to the 'marray' property is restricted. Left and right-hand side must be identical.\")\n # Assume that the data has been changed in the meantime.\n self.changed()\n \nParticles.ParticleProperty.marray = property(_ParticleProperty_marray, _ParticleProperty_marray_assign)\n# For backward compatibility with OVITO 2.5.1:\nParticles.ParticleProperty.mutable_array = property(lambda self: self.marray)\n\n# Returns a NumPy array wrapper for a bond property.\ndef _BondProperty_array(self):\n \"\"\" \n This attribute returns a NumPy array, which provides read access to the per-bond data stored in this bond property object.\n \n The returned array is one-dimensional for scalar bond properties (:py:attr:`.components` == 1),\n or two-dimensional for vector properties (:py:attr:`.components` > 1). The outer length of the array is \n equal to the number of half-bonds in both cases.\n \n Note that the returned NumPy array is read-only and provides a view of the internal data. \n No copy of the data, which may be shared by multiple objects, is made. If you want to modify the \n data stored in this bond property, use :py:attr:`.marray` instead.\n \"\"\"\n return numpy.asarray(self)\nParticles.BondProperty.array = property(_BondProperty_array)\n\n# Returns a NumPy array wrapper for a bond property with write access.\ndef _BondProperty_marray(self):\n \"\"\" \n This attribute returns a *mutable* NumPy array providing read/write access to the internal per-bond data.\n \n The returned array is one-dimensional for scalar bond properties (:py:attr:`.components` == 1),\n or two-dimensional for vector properties (:py:attr:`.components` > 1). The outer length of the array is \n equal to the number of half-bonds in both cases.\n \n .. note::\n \n After you are done modifying the data in the returned NumPy array, you must call\n :py:meth:`.changed`! Calling this method is necessary to inform the data pipeline system\n that the input bond data has changed and the modification pipeline needs to be re-evaluated.\n The reason is that OVITO cannot automatically detect modifications made by the script to \n the returned NumPy array. Therefore, an explicit call to :py:meth:`.changed` is necessary. \n \n \"\"\"\n class DummyClass:\n pass\n o = DummyClass()\n o.__array_interface__ = self.__mutable_array_interface__\n # Create reference to particle property object to keep it alive.\n o.__base_property = self\n return numpy.asarray(o)\n \nParticles.BondProperty.marray = property(_BondProperty_marray, _ParticleProperty_marray_assign)\n\n# Returns a NumPy array wrapper for bonds list.\ndef _Bonds_array(self):\n \"\"\" This attribute returns a NumPy array providing direct access to the bond list.\n \n The returned array is two-dimensional and contains pairs of particle indices connected by a bond.\n The array's shape is *N x 2*, where *N* is the number of half bonds. Each pair-wise bond occurs twice\n in the array, once for the connection A->B and second time for the connection B->A.\n Particle indices start at 0.\n \n Note that the returned NumPy array is read-only and provides a view of the internal data. \n No copy of the data is made.\n \"\"\"\n return numpy.asarray(self)\nParticles.Bonds.array = property(_Bonds_array)\n\ndef _Bonds_add(self, p1, p2):\n \"\"\" Creates a new half-bond from particle *p1* to particle *p2*. \n \n To also create a half-bond from *p2* to *p1*, use :py:meth:`.add_full` instead.\n\n :param int p1: Zero-based index of the particle at which the bonds originates.\n :param int p2: Zero-based index of the particle the bonds leads to.\n \"\"\"\n self.addBond(p1, p2, (0,0,0))\nParticles.Bonds.add = _Bonds_add\n\ndef _Bonds_add_full(self, p1, p2):\n \"\"\" Creates two half-bonds between the particles *p1* and *p2*.\n \n :param int p1: Zero-based index of the first particle.\n :param int p2: Zero-based index of the second particle.\n \"\"\"\n self.addBond(p1, p2, (0,0,0))\n self.addBond(p2, p1, (0,0,0))\nParticles.Bonds.add_full = _Bonds_add_full\n\n# Implement 'pbc' property of SimulationCell class.\ndef _get_SimulationCell_pbc(self):\n \"\"\" A tuple with three boolean values, which specify periodic boundary flags of the simulation cell along each cell vector. \"\"\"\n return (self.pbc_x, self.pbc_y, self.pbc_z)\ndef _set_SimulationCell_pbc(self, flags):\n assert(len(flags) == 3) # Expected tuple with three Boolean flags.\n self.pbc_x = flags[0]\n self.pbc_y = flags[1]\n self.pbc_z = flags[2]\nParticles.SimulationCell.pbc = property(_get_SimulationCell_pbc, _set_SimulationCell_pbc)\n\n# Implement 'matrix' property of SimulationCell class.\ndef _get_SimulationCell_matrix(self):\n \"\"\" A 3x4 matrix containing the three edge vectors of the cell (matrix columns 0 to 2)\n and the cell origin (matrix column 3). \"\"\"\n a = numpy.asarray(self.cellMatrix)\n a.setflags(write = 0) # Mark array data as read-only, because it's only a temporary object.\n return a\ndef _set_SimulationCell_matrix(self, m):\n a = numpy.asarray(m)\n assert(a.shape == (3,4)) # Expected 3x4 matrix array\n self.vector1 = a[:,0]\n self.vector2 = a[:,1]\n self.vector3 = a[:,2]\n self.origin = a[:,3]\nParticles.SimulationCell.matrix = property(_get_SimulationCell_matrix, _set_SimulationCell_matrix)\n\nclass CutoffNeighborFinder(Particles.CutoffNeighborFinder):\n \"\"\" \n A utility class that computes particle neighbor lists.\n \n This class allows to iterate over the neighbors of each particle within a given cutoff distance.\n You can use it to build neighbors lists or perform other kinds of analyses that require neighbor information.\n \n The constructor takes a positive cutoff radius and a :py:class:`DataCollection ` \n containing the input particle positions and the cell geometry (including periodic boundary flags).\n \n Once the :py:class:`!CutoffNeighborFinder` has been constructed, you can call its :py:meth:`.find` method to \n iterate over the neighbors of a specific particle, for example:\n \n .. literalinclude:: ../example_snippets/cutoff_neighbor_finder.py\n \n If you want to determine the *N* nearest neighbors of a particle,\n use the :py:class:`NearestNeighborFinder` class instead. \n \"\"\"\n \n def __init__(self, cutoff, data_collection):\n \"\"\" This is the constructor. \"\"\"\n super(self.__class__, self).__init__() \n if not hasattr(data_collection, 'position'):\n raise KeyError(\"Data collection does not contain particle positions.\")\n if not hasattr(data_collection, 'cell'):\n raise KeyError(\"Data collection does not contain simulation cell information.\")\n self.particle_count = data_collection.number_of_particles\n self.prepare(cutoff, data_collection.position, data_collection.cell)\n \n def find(self, index):\n \"\"\" \n Returns an iterator over all neighbors of the given particle.\n \n :param int index: The index of the central particle whose neighbors should be iterated. Particle indices start at 0.\n :returns: A Python iterator that visits all neighbors of the central particle within the cutoff distance. \n For each neighbor the iterator returns an object with the following attributes:\n \n * **index**: The index of the current neighbor particle (starting at 0).\n * **distance**: The distance of the current neighbor from the central particle.\n * **distance_squared**: The squared neighbor distance.\n * **delta**: The three-dimensional vector connecting the central particle with the current neighbor (taking into account periodicity).\n * **pbc_shift**: The periodic shift vector, which specifies how often each periodic boundary of the simulation cell is crossed when going from the central particle to the current neighbor.\n \n Note that all periodic images of particles within the cutoff radius are visited. Thus, the same particle index may appear multiple times in the neighbor\n list of a central particle. In fact, the central particle may be among its own neighbors in a sufficiently small periodic simulation cell.\n However, the computed vector (``delta``) and PBC shift (``pbc_shift``) will be unique for each visited image of a neighboring particle.\n \"\"\"\n if index < 0 or index >= self.particle_count:\n raise IndexError(\"Particle index is out of range.\")\n # Construct the C++ neighbor query. \n query = Particles.CutoffNeighborFinder.Query(self, index)\n # Iterate over neighbors.\n while not query.atEnd:\n yield query\n query.next()\n \novito.data.CutoffNeighborFinder = CutoffNeighborFinder\n\nclass NearestNeighborFinder(Particles.NearestNeighborFinder):\n \"\"\" \n A utility class that finds the *N* nearest neighbors of a particle.\n \n The constructor takes the number of requested nearest neighbors, *N*, and a :py:class:`DataCollection ` \n containing the input particle positions and the cell geometry (including periodic boundary flags).\n *N* must be a positive integer not greater than 30 (which is the built-in maximum supported by this class).\n \n Once the :py:class:`!NearestNeighborFinder` has been constructed, you can call its :py:meth:`.find` method to \n iterate over the sorted list of nearest neighbors of a specific particle, for example:\n \n .. literalinclude:: ../example_snippets/nearest_neighbor_finder.py\n \n If you want to iterate over all neighbors within a certain cutoff radius of a central particle,\n use the :py:class:`CutoffNeighborFinder` class instead.\n \"\"\"\n \n def __init__(self, N, data_collection):\n \"\"\" This is the constructor. \"\"\"\n super(self.__class__, self).__init__(N)\n if N<=0 or N>30:\n raise ValueError(\"The requested number of nearest neighbors is out of range.\")\n if not hasattr(data_collection, 'position'):\n raise KeyError(\"Data collection does not contain particle positions.\")\n if not hasattr(data_collection, 'cell'):\n raise KeyError(\"Data collection does not contain simulation cell information.\")\n self.particle_count = data_collection.number_of_particles\n self.prepare(data_collection.position, data_collection.cell)\n \n def find(self, index):\n \"\"\" \n Returns an iterator that visits the *N* nearest neighbors of the given particle in order of ascending distance.\n \n :param int index: The index of the central particle whose neighbors should be iterated. Particle indices start at 0.\n :returns: A Python iterator that visits the *N* nearest neighbors of the central particle in order of ascending distance. \n For each visited neighbor the iterator returns an object with the following attributes:\n \n * **index**: The index of the current neighbor particle (starting at 0).\n * **distance**: The distance of the current neighbor from the central particle.\n * **distance_squared**: The squared neighbor distance.\n * **delta**: The three-dimensional vector connecting the central particle with the current neighbor (taking into account periodicity).\n \n Note that several periodic images of the same particle may be visited. Thus, the same particle index may appear multiple times in the neighbor\n list of a central particle. In fact, the central particle may be among its own neighbors in a sufficiently small periodic simulation cell.\n However, the computed neighbor vector (``delta``) will be unique for each visited image of a neighboring particle.\n \n The number of neighbors actually visited may be smaller than the requested number, *N*, if the\n system contains too few particles and no periodic boundary conditions are used.\n \"\"\"\n if index < 0 or index >= self.particle_count:\n raise IndexError(\"Particle index is out of range.\")\n # Construct the C++ neighbor query. \n query = Particles.NearestNeighborFinder.Query(self)\n query.findNeighbors(index)\n # Iterate over neighbors.\n for i in range(query.count):\n yield query[i]\n \novito.data.NearestNeighborFinder = NearestNeighborFinder\n\ndef _ParticleProperty_create(prop_type, num_particles):\n \"\"\"\n Static factory function that creates a new :py:class:`!ParticleProperty` instance for a standard particle property.\n To create a new user-defined property, use :py:meth:`.create_user` instead. \n \n Note that this factory function is a low-level method. If you want to add a new \n particle property to an existing :py:class:`~ovito.data.DataCollection`, you can do so using the high-level method \n :py:meth:`~ovito.data.DataCollection.create_particle_property` instead.\n \n :param ParticleProperty.Type prop_type: The standard particle property to create. See the :py:attr:`.type` attribute for a list of possible values.\n :param int num_particles: The number of particles. This determines the size of the allocated data array.\n :returns: A newly created instance of the :py:class:`!ParticleProperty` class or one of its sub-classes. \n \n \"\"\"\n assert(isinstance(prop_type, ovito.data.ParticleProperty.Type))\n assert(prop_type != ovito.data.ParticleProperty.Type.User)\n assert(num_particles >= 0)\n \n return ovito.data.ParticleProperty.createStandardProperty(ovito.dataset, num_particles, prop_type, 0, True)\novito.data.ParticleProperty.create = staticmethod(_ParticleProperty_create)\n\ndef _ParticleProperty_create_user(name, data_type, num_particles, num_components = 1):\n \"\"\"\n Static factory function that creates a new :py:class:`!ParticleProperty` instance for a user-defined particle property.\n To create one of the standard properties, use :py:meth:`.create` instead. \n \n Note that this factory function is a low-level method. If you want to add a new user-defined \n particle property to an existing :py:class:`~ovito.data.DataCollection`, you can do so using the high-level method \n :py:meth:`~ovito.data.DataCollection.create_user_particle_property` instead.\n\n :param str name: The name of the user-defined particle property to create.\n :param str data_type: Must be either ``\"int\"`` or ``\"float\"``. \n :param int num_particles: The number of particles. This determines the size of the allocated data array.\n :param int num_components: The number of components when creating a vector property.\n :returns: A newly created instance of the :py:class:`!ParticleProperty` class. \n \n \"\"\"\n assert(num_particles >= 0)\n assert(num_components >= 1)\n if data_type == \"int\":\n data_type = PyQt5.QtCore.QMetaType.type(\"int\")\n elif data_type == \"float\":\n data_type = PyQt5.QtCore.QMetaType.type(\"FloatType\")\n else:\n raise RuntimeError(\"Invalid data type. Only 'int' or 'float' are allowed.\")\n \n return ovito.data.ParticleProperty.createUserProperty(ovito.dataset, num_particles, data_type, num_components, 0, name, True)\novito.data.ParticleProperty.create_user = staticmethod(_ParticleProperty_create_user)\n\n# Extend DataCollection class by adding the 'create_particle_property()' and 'create_user_particle_property()' methods.\ndef _DataCollection_create_particle_property(self, property_type, data = None):\n \"\"\" \n Adds a standard particle property to this data collection.\n \n If the specified particle property already exists in this data collection, the existing property instance is returned.\n Otherwise the method creates a new property instance using :py:meth:`ParticleProperty.create` and adds it to this data collection.\n \n The optional parameter *data* allows to directly set or initialize the values of the particle property.\n \n :param ParticleProperty.Type property_type: The standard particle property to create. See the :py:attr:`ParticleProperty.type` attribute for a list of possible values.\n :param data: An optional data array (e.g. NumPy array), which contains the per-particle values used to initialize the particle property.\n The size of the array must match the number of particles in this data collection (see :py:attr:`.number_of_particles` attribute). \n :returns: A newly created instance of the :py:class:`ovito.data.ParticleProperty` class or one of its sub-classes if the property did not exist yet in the data collection.\n Otherwise, the existing particle property object is returned.\n \n \"\"\"\n # Check if property already exists in the data collection.\n prop = None\n position_prop = None\n for obj in self.values():\n if isinstance(obj, ovito.data.ParticleProperty):\n if obj.type == property_type:\n prop = obj\n if obj.type == ovito.data.ParticleProperty.Type.Position:\n position_prop = obj\n\n # First we have to determine the number of particles. This requires the 'Position' particle property\n if position_prop is None:\n raise RuntimeError(\"Cannot add new particle property to data collection, because data collection contains no particles.\")\n num_particles = position_prop.size\n \n if prop is None:\n # If property does not exists yet, create a new ParticleProperty instance.\n prop = ovito.data.ParticleProperty.create(property_type, num_particles)\n self.add(prop)\n else:\n # Otherwise, make sure the existing property is a fresh copy so we can safely modify it.\n prop = self.copy_if_needed(prop)\n \n # Initialize property with per-particle data if provided.\n if data is not None:\n prop.marray[:] = data\n prop.changed()\n \n return prop\novito.data.DataCollection.create_particle_property = _DataCollection_create_particle_property\n\ndef _DataCollection_create_user_particle_property(self, name, data_type, num_components=1, data = None):\n \"\"\" \n Adds a user-defined particle property to this data collection.\n \n If a particle property with the given name already exists in this data collection, the existing property instance is returned.\n Otherwise the method creates a new property instance using :py:meth:`ParticleProperty.create_user` and adds it to this data collection.\n \n The optional parameter *data* allows to directly set or initialize the values of the particle property.\n \n :param str name: The name of the user-defined particle property to create.\n :param str data_type: Must be either ``\"int\"`` or ``\"float\"``. \n :param int num_components: The number of components when creating a vector property.\n :param data: An optional data array (e.g. NumPy array), which contains the per-particle values used to initialize the particle property.\n The size of the array must match the number of particles in this data collection (see :py:attr:`.number_of_particles` attribute). \n :returns: A newly created instance of the :py:class:`~ovito.data.ParticleProperty` class or one of its sub-classes if the property did not exist yet in the data collection.\n Otherwise, the existing particle property object is returned.\n \n \"\"\"\n # Check if property already exists in the data collection.\n prop = None\n position_prop = None\n for obj in self.values():\n if isinstance(obj, ovito.data.ParticleProperty):\n if obj.name == name:\n prop = obj\n if obj.type == ovito.data.ParticleProperty.Type.Position:\n position_prop = obj\n\n # First we have to determine the number of particles. This requires the 'Position' particle property\n if position_prop is None:\n raise RuntimeError(\"Cannot add new particle property to data collection, because data collection contains no particles.\")\n num_particles = position_prop.size\n \n if prop is None:\n # If property does not exists yet, create a new ParticleProperty instance.\n prop = ovito.data.ParticleProperty.create_user(name, data_type, num_particles, num_components)\n self.add(prop)\n else:\n # Otherwise, make sure the existing property is a fresh copy so we can safely modify it.\n prop = self.copy_if_needed(prop)\n \n # Initialize property with per-particle data if provided.\n if data is not None:\n prop.marray[:] = data\n prop.changed()\n \n return prop\novito.data.DataCollection.create_user_particle_property = _DataCollection_create_user_particle_property\n\n# Extend the DataCollection class with a 'number_of_particles' property.\ndef _get_DataCollection_number_of_particles(self):\n \"\"\" The number of particles stored in the data collection. \"\"\"\n # The number of particles is determined by the size of the 'Position' particle property.\n for obj in self.objects:\n if isinstance(obj, ovito.data.ParticleProperty) and obj.type == ovito.data.ParticleProperty.Type.Position:\n return obj.size\n return 0\novito.data.DataCollection.number_of_particles = property(_get_DataCollection_number_of_particles)\n\n# Extend the DataCollection class with a 'number_of_bonds' property.\ndef _get_DataCollection_number_of_bonds(self):\n \"\"\" The number of half-bonds stored in the data collection. \"\"\"\n # The number of bonds is determined by the size of the BondsObject.\n try:\n return self.bonds.size\n except:\n return 0\novito.data.DataCollection.number_of_bonds = property(_get_DataCollection_number_of_bonds)\n\n# Implement the 'type_list' property of the ParticleTypeProperty class, which provides access to particle types. \ndef _get_ParticleTypeProperty_type_list(self):\n \"\"\"A mutable list of :py:class:`ParticleType` instances.\"\"\" \n class ParticleTypeList(collections.MutableSequence):\n def __init__(self, owner):\n self.__owner = owner;\n def __len__(self):\n return len(self.__owner.particleTypes)\n def __getitem__(self, index):\n if index < 0: index += len(self)\n return self.__owner.particleTypes[index]\n def __delitem__(self, index):\n if index < 0: index += len(self)\n self.__owner.removeParticleType(index)\n def __setitem__(self, index, obj):\n if index < 0: index += len(self)\n self.__owner.removeParticleType(index)\n self.__owner.insertParticleType(index, obj)\n def insert(self, index, obj):\n if index < 0: index += len(self)\n self.__owner.insertParticleType(index, obj)\n return ParticleTypeList(self)\novito.data.ParticleTypeProperty.type_list = property(_get_ParticleTypeProperty_type_list)\n\n# Implement the 'type_list' property of the BondTypeProperty class, which provides access to bond types. \ndef _get_BondTypeProperty_type_list(self):\n \"\"\"A mutable list of :py:class:`BondType` instances.\"\"\" \n class BondTypeList(collections.MutableSequence):\n def __init__(self, owner):\n self.__owner = owner;\n def __len__(self):\n return len(self.__owner.bondTypes)\n def __getitem__(self, index):\n if index < 0: index += len(self)\n return self.__owner.bondTypes[index]\n def __delitem__(self, index):\n if index < 0: index += len(self)\n self.__owner.removeBondType(index)\n def __setitem__(self, index, obj):\n if index < 0: index += len(self)\n self.__owner.removeBondType(index)\n self.__owner.insertBondType(index, obj)\n def insert(self, index, obj):\n if index < 0: index += len(self)\n self.__owner.insertBondType(index, obj)\n return BondTypeList(self)\novito.data.BondTypeProperty.type_list = property(_get_BondTypeProperty_type_list)\n\nclass Enumerator(Particles.Bonds.ParticleBondMap):\n \"\"\"\n Utility class that allows to efficiently iterate over the bonds that are adjacent to a particular particle.\n\n The class constructor takes the :py:class:`Bonds` object for which it will first build a lookup table.\n After the :py:class:`!Enumerator` has been constructed, the half-bonds of a\n particular particle can be found using the :py:meth:`.bonds_of_particle` method.\n\n Warning: Do not modify the underlying :py:class:`Bonds` object while using the :py:class:`!Enumerator`.\n Adding or deleting bonds would render the internal lookup table of the :py:class:`!Enumerator`\n invalid.\n \"\"\" \n \n def __init__(self, bonds):\n \"\"\" This is the constructor. \"\"\"\n super(self.__class__, self).__init__(bonds) \n \n def bonds_of_particle(self, particle_index):\n \"\"\"\n Returns an iterator that yields the indices of the half-bonds connected to the given particle.\n \"\"\"\n eol = self.endOfListValue\n currentBondIndex = self.firstBondOfParticle(particle_index)\n while currentBondIndex != eol:\n yield currentBondIndex\n currentBondIndex = self.nextBondOfParticle(currentBondIndex)\novito.data.Bonds.Enumerator = Enumerator\ndel Enumerator\n","repo_name":"aajarven/matfys-kesakoulu","sub_path":"ovito-2.6.2-x86_64/lib/ovito/plugins/python/ovito/data/particles/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":35752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28721113434","text":"import numpy as np\nimport pylab as plt\nimport sys\n\n\ndef read_data(fname):\n d = np.loadtxt(fname)\n x, y, z = (int(i) for i in d[:3])\n im = d[3:]\n im.resize((x,y,z))\n return im.T\n\n\nif __name__ == '__main__':\n fname = sys.argv[1]\n data = read_data(fname)\n\n z0, y0, x0 = [int(np.floor(i/2)) for i in data.shape]\n\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n\n ax1.imshow(data[z0,:,:], interpolation='nearest')\n ax1.set_title('XY')\n\n ax2.imshow(data[:,y0,:], interpolation='nearest')\n ax2.set_title('ZX')\n\n plt.show()\n","repo_name":"mpascucci/pycroscopy3D","sub_path":"deconvolve/python/tests/simple_plot.py","file_name":"simple_plot.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"31219609474","text":"import logging\nfrom flask_sqlalchemy import SQLAlchemy\nimport flask\n\n# Create the SQLAlchemy object to be initialized later in init_db()\ndb = SQLAlchemy()\n\nclass DataValidationError(Exception):\n \"\"\" Used for an data validation errors when deserializing \"\"\"\n pass\n\nclass Product(db.Model):\n \"\"\"\n Class that represents a Product\n \"\"\"\n\n logger = logging.getLogger('app')\n app = None\n\n # Table Schema\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50))\n stock = db.Column(db.Integer)\n price = db.Column(db.Numeric(18,2))\n description = db.Column(db.String(255))\n category = db.Column(db.String(50))\n\n def save(self):\n \"\"\"\n Saves a Product to the data store\n \"\"\"\n Product.logger.info('Saving %s', self.name)\n if not self.id:\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n Product.logger.info(\"Deleting %s\", self.name)\n db.session.delete(self)\n db.session.commit()\n\n @classmethod\n def delete_all(cls):\n Product.logger.info(\"Deleting all products\")\n db.session.query(cls).delete()\n db.session.commit()\n\n def serialize(self):\n \"\"\" Serializes a Product into a dictionary \"\"\"\n return {\"id\": self.id,\n \"name\": self.name,\n \"stock\": self.stock,\n \"price\": float(self.price),\n \"description\": self.description,\n \"category\": self.category}\n\n def deserialize(self, data):\n \"\"\"\n Deserializes a Product from a dictionary\n Args:\n data (dict): A dictionary containing the Product data\n \"\"\"\n try:\n if data['name'] == '' or data['category'] == '' :\n raise DataValidationError('Field cannot be empty string')\n self.name = data['name']\n self.stock = data['stock']\n self.price = data['price']\n self.description = data['description']\n self.category = data['category']\n except KeyError as error:\n raise DataValidationError(\n 'Invalid product: missing ' + error.args[0])\n except TypeError as error:\n raise DataValidationError('Invalid pet: body of request contained'\n 'bad or no data')\n return self\n\n @classmethod\n def init_db(cls, app):\n \"\"\" Initializes the database session \"\"\"\n cls.logger.info('Initializing database')\n cls.app = app\n # This is where we initialize SQLAlchemy from the Flask app\n db.init_app(app)\n if flask.has_request_context() == False:\n app.app_context().push()\n with app.app_context():\n db.create_all() # make our sqlalchemy tables\n\n @classmethod\n def find(cls, product_id):\n \"\"\" Finds a Product by it's ID \"\"\"\n cls.logger.info('Processing lookup for id %s ...', product_id)\n return cls.query.get(product_id)\n\n @classmethod\n def find_by_category(cls, category):\n cls.logger.info('Processing category query for %s ...', category)\n return cls.query.filter(cls.category == category)\n\n @classmethod\n def find_by_name(cls, name):\n cls.logger.info('Processing name query for %s ...', name)\n return cls.query.filter(cls.name == name)\n\n @classmethod\n def find_by_price(cls, low, high):\n cls.logger.info('Processing price query as range (%d %d] ...', low, high)\n return cls.query.filter(db.and_(cls.price > low, cls.price <= high))\n\n @classmethod\n def all(cls):\n cls.logger.info('Processing all Products')\n return cls.query.all()\n","repo_name":"Products-Squad/products","sub_path":"service/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7486721407","text":"#!/usr/bin/env python\n\nimport sys\nimport rospy\nimport cv_bridge\nimport cv2\nimport numpy as numpy\nfrom geometry_msgs.msg import Point\nfrom sensor_msgs.msg import Image, Range\nfrom move_arm.srv import *\n\n#print('vision publisher')\n\nclass coordinate(object):\n def __init__(self):\n self.x = 0.0\n self.y = 0.0\n self.z = 0.0\n\ndef call_find_cups():\n print('call find_cups')\n request = targetRequest()\n request.data = 0\n rospy.wait_for_service('find_cups')\n get_positions = rospy.ServiceProxy('find_cups', target)\n \n while True:\n print('attempting...')\n try:\n positions = get_positions.call(request)\n print(positions)\n return\n except rospy.ServiceException as exc:\n print(\"Service did not process request...\")\n\ndef callback(data):\n return\n\nif __name__ == '__main__':\n call_find_cups()\n\"\"\"\n try:\n coord = coordinate()\n rospy.init_node('detect_tags')\n\"\"\"\n","repo_name":"edchang23/bartender_robot","sub_path":"src/move_arm/src/vision_publish.py","file_name":"vision_publish.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13993581331","text":"__all__ = [\"train\"]\n\nimport tempfile\nfrom collections.abc import Mapping\nfrom pathlib import Path\nfrom warnings import warn\n\nimport dgl\nimport torch\nimport torch.nn as nn\n\n# pyre-ignore[21]:\nimport wandb\nfrom dgl.dataloading import DataLoader\nfrom torch import Tensor\nfrom tqdm import tqdm\n\nfrom .. import Graph\nfrom ..models import EGI\nfrom ..samplers import KHopTriangleSampler\n\n\ndef train(\n graph: Graph,\n features: Tensor,\n device: torch.device,\n config: Mapping,\n sampler_type: str,\n) -> \"Model\":\n dgl_graph: dgl.DGLGraph = graph.as_dgl_graph(device)\n\n # setup temporary directory for saving models for early-stopping\n temporary_directory = tempfile.TemporaryDirectory()\n early_stopping_filepath = Path(temporary_directory.name, \"stopping.pt\")\n\n if sampler_type == \"egi\":\n sampler: dgl.dataloading.Sampler = dgl.dataloading.NeighborSampler(\n [10 for i in range(config[\"k\"])]\n )\n\n elif sampler_type == \"triangle\":\n if not graph.has_mined_triangles():\n warn(\"Input graph contains no mined triangles - mining now\")\n graph.mine_triangles()\n triangles = graph.get_triangles_dictionary()\n sampler: dgl.dataloading.Sampler = KHopTriangleSampler(\n dgl_graph, [10 for i in range(config[\"k\"])], triangles\n )\n\n # pyre-ignore[16]:\n features = features.to(device)\n dgl_graph = dgl_graph.to(device)\n\n in_feats = features.shape[1]\n\n # in the original code, they set number of layers to equal k +1\n n_layers = config[\"k\"] + 1\n\n model = EGI(\n in_feats,\n config[\"hidden_layers\"],\n n_layers,\n nn.PReLU(config[\"hidden_layers\"]),\n )\n\n model = model.to(device)\n\n wandb.watch(model)\n\n if config[\"load_weights_from\"] is not None:\n model.load_state_dict(torch.load(config[\"load_weights_from\"]), strict=False)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=config[\"lr\"], weight_decay=0.0)\n\n # some summary statistics\n best = 1e9\n best_epoch = -1\n\n # shuffle nodes before putting into train and validation sets\n indexes = torch.randperm(dgl_graph.nodes().shape[0])\n validation_size = dgl_graph.nodes().shape[0] // 6\n val_nodes = torch.split(dgl_graph.nodes()[indexes], validation_size)[0]\n train_nodes = torch.unique(torch.cat([val_nodes, dgl_graph.nodes()]))\n\n training_dataloader = DataLoader(\n dgl_graph,\n train_nodes,\n sampler,\n batch_size=config[\"batch_size\"],\n shuffle=True,\n device=device,\n )\n\n for epoch in tqdm(range(config[\"n_epochs\"])):\n log = dict()\n\n model.train()\n\n loss = 0.0\n\n model.train()\n optimizer.zero_grad()\n\n # the sampler returns a list of blocks and involved nodes\n # each block holds a set of edges from a source to destination\n # each block is a hop in the graph\n for blocks in training_dataloader:\n batch_loss = model(dgl_graph, features, blocks)\n batch_loss.backward()\n optimizer.step()\n loss += batch_loss.detach()\n\n log.update({f\"{config['wandb_summary_prefix']}-training-loss\": loss})\n\n del batch_loss, loss, blocks\n\n # VALIDATION\n\n model.eval()\n with torch.no_grad():\n loss = 0.0\n blocks = sampler.sample(dgl_graph, val_nodes)\n loss = model(dgl_graph, features, blocks)\n\n log.update(\n {f\"{config['wandb_summary_prefix']}-validation-loss\": loss.detach()}\n )\n\n wandb.log(log)\n\n # early stopping\n if loss <= best + config[\"min_delta\"]:\n best = loss\n best_epoch = epoch\n # save current weights\n torch.save(model.state_dict(), early_stopping_filepath)\n\n del loss, blocks\n\n if epoch - best_epoch > config[\"patience\"]:\n print(\"Early stopping!\")\n model.load_state_dict(torch.load(early_stopping_filepath))\n\n wandb.summary[\n f\"{config['wandb_summary_prefix']}-early-stopping-epoch\"\n ] = best_epoch\n\n break\n\n if config[\"save_weights_to\"] is not None:\n print(f\"Saving model parameters to {config['save_weights_to']}\")\n\n torch.save(model.state_dict(), config[\"save_weights_to\"])\n\n model.eval()\n model.encoder.eval()\n\n temporary_directory.cleanup()\n\n return model.encoder\n","repo_name":"niklasdewally/graph-transfer-learning","sub_path":"src/gtl/training/_egi.py","file_name":"_egi.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69992235145","text":"# installed libraries\nfrom names import get_full_name, get_first_name, get_last_name\nfrom brule import full_name, first_name, last_name\nfrom numpy.random import normal, rand\n\n# part of the Standard Library\nfrom datetime import datetime, timedelta, time, date\nfrom random import choice, randint\nfrom uuid import uuid4\n\n\n#------------------------#\n# #\n# Abstracted Classes #\n# #\n#------------------------#\n\nclass _Numeric():\n \"\"\"\n Abstract class extended by UniformDist and NormalDist\n \"\"\"\n \n def __init__(self, precision=0):\n self.precision = precision\n\n\n#-------------------#\n# #\n# Numeric Types #\n# #\n#-------------------#\n\nclass UniformDist(_Numeric):\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of numbers with a uniform distribution\n \"\"\"\n\n # TODO there's an issue where a uniform\n # distribution isn't always created, but rather\n # the first group is low and the last is high\n\n def __init__(self, low, high, precision=None):\n \"\"\"\n Create instance of UniformDist field type\n\n Required params:\n low: Inclusive lower bound of the uniform distribution\n high: Inclusive upper bound of the uniform distribution\n \n Optional params:\n precision: Number of decimal places. Defaults to None, which\n results in an integer\n \"\"\"\n \n super().__init__(precision)\n self.low = low\n self.high = high\n\n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n return [\n round((rand() * (self.high - self.low) + self.low), self.precision)\n for _ in range(n)\n ]\n\n\nclass NormalDist(_Numeric):\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of numbers with a normal distribution\n \"\"\"\n\n def __init__(self, mean, sd, bounds=None, precision=None):\n \"\"\"\n Create instance of NormalDist field type\n\n Required params:\n mean: Mean of the normal distribution\n sd: Standard Deviation of the normal distribution\n \n Optional params:\n bounds: Tuple containing the lower and upper bounds of the distribution.\n Defaults to None\n precision: Number of decimal places. Defaults to None, which\n results in an integer\n \"\"\"\n\n super().__init__(precision)\n self.mean = mean\n self.sd = sd\n self.bounds = bounds\n\n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n if self.bounds:\n low, high = self.bounds\n out_of_bound = low-1\n\n series = []\n\n for i in range(n):\n num = out_of_bound\n while num < low or num > high:\n num = normal(self.mean, self.sd)\n series.append(round(num, self.precision))\n\n return series\n\n else:\n return [round(normal(self.mean, self.sd), self.precision) for _ in range(n)]\n\n\n#----------------#\n# #\n# Text Types #\n# #\n#----------------#\n\nclass Name():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of names\n \"\"\"\n \n def __init__(self, first_only=False, last_only=False, brule=False, depends_on=None):\n \"\"\"\n Create instance of Name field type\n\n Optional params:\n first_only: Boolean indicating whether or not to return just a first name.\n Defaults to False\n last_only: Boolean indicating whether or not to return just a last name.\n Defaults to False\n brule: Boolean indicated whether or not to return a \"Brule-ized\" name.\n Defaults to False\n depends_on: Column name of a gender field (if applicable), so that random\n names match up with random genders\n \"\"\"\n \n self.first_only = first_only\n self.last_only = last_only\n self.brule = brule\n self.depends_on = depends_on\n\n\n def _to_series(self, n, dep_series=None):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n # choose which functions to use\n if self.brule: \n F = {'full': full_name, 'first': first_name, 'last': last_name}\n else:\n F = {'full': get_full_name, 'first': get_first_name, 'last': get_last_name}\n\n if self.depends_on:\n dep_series = dep_series.apply(lambda x: 'male' if x.lower()[0] == 'm' else 'female')\n\n if self.first_only:\n return [F['first'](gender=dep_series[i]) for i in range(n)]\n if self.last_only:\n return [F['last']() for _ in range(n)]\n \n # else\n return [F['full'](gender=dep_series[i]) for i in range(n)] \n\n else:\n if self.first_only:\n return [F['first']() for _ in range(n)]\n if self.last_only:\n return [F['last']() for _ in range(n)]\n \n # else\n return [F['full']() for _ in range(n)] \n\n\nclass Group():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of different groups / classes\n \"\"\"\n\n def __init__(self, groups):\n \"\"\"\n Create instance of Name field type\n\n Required params:\n groups: List of groups (with the option of providing discrete probabilites)\n \"\"\"\n\n self.groups = groups\n\n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n \n # check if custom probabilites were provided\n if isinstance(self.groups[0], (list, tuple)):\n \n if sum([x[1] for x in self.groups]) != 1:\n raise ValueError(\"Probabilites don't add to 1\")\n\n series = []\n for _ in range(n):\n r = rand()\n for label, prob in self.groups:\n r -= prob\n if r <= 0:\n series.append(label)\n break\n return series\n\n # if no probabilities were given, use a uniform dist\n else:\n return [choice(self.groups) for _ in range(n)]\n\n\nclass Custom():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series based on a custom function\n \"\"\"\n\n def __init__(self, base, func):\n \"\"\"\n Create instance of Custom field type\n\n Required params:\n base: Name of column which this function is applied to\n func: Function to be applied\n \"\"\"\n\n if not hasattr(func, '__call__'):\n raise TypeError('`func` must be a function')\n \n self.base = base\n self.func = func\n\n def _to_series(self, base_series):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n return base_series.apply(self.func)\n\n\nclass Constant():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of a constant value\n \"\"\"\n\n def __init__(self, value):\n \"\"\"\n Create instance of Constant field type\n\n Required params:\n value: Constant value to be returned\n \"\"\"\n\n self.value = value\n\n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n return [self.value for _ in range(n)]\n\n\n#--------------------#\n# #\n# DateTime Types #\n# #\n#--------------------#\n\nclass Date():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of a dates \n \"\"\"\n \n def __init__(self, start, end):\n \"\"\"\n Create instance of Date field type\n\n Required params:\n start: Start of date range\n end: End of date range\n \"\"\"\n\n # clean args\n if not isinstance(start, (date, datetime)):\n try:\n start = self._parse_date(start)\n except:\n raise ValueError(\"Unable to parse start date, see `help(Date)` for info\")\n\n if not isinstance(end, (date, datetime)):\n try:\n end = self._parse_date(end)\n except:\n raise ValueError(\"Unable to parse end date, see `help(Date)` for info\")\n\n if start >= end:\n raise ValueError(\"`start` time must be before `end` time\")\n\n self.start = start\n self.end = end\n\n\n def _parse_date(self, date_string):\n \"\"\"\n Parse date string into datetime.date\n \"\"\"\n\n parts = re_split(r'(-|/|\\.|\\s)', date_string)\n parts = [p for p in parts if p not in ('-', '/', ':', ' ', '.')]\n\n if len(parts) == 3:\n return date(*list(map(int, parts)))\n \n else:\n raise ValueError(f\"Unable to parse date: {date_string}\")\n\n \n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n diff = self.end - self.start\n return [self.start + timedelta(days=randint(0, diff.days)) for _ in range(n)]\n\n\nclass DateTime():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of a datetimes\n \"\"\"\n\n def __init__(self, start, end, unix=False):\n \"\"\"\n Create instance of DateTime field type\n\n Required params:\n start: Start of datetime range\n end: End of datetime range\n\n Optional params:\n unix: Boolean of whether or not to return as UNIX timestamp.\n Defaults to False\n \"\"\"\n \n if not isinstance(start, datetime):\n try:\n start = self._parse_date(start)\n except:\n raise TypeError(\"Unable to parse start date, see `help(DateTime)` for info\")\n\n if not isinstance(end, datetime):\n try:\n end = self._parse_date(end)\n except:\n raise TypeError(\"Unable to parse end date, see `help(DateTime)` for info\")\n\n if start >= end:\n raise ValueError(\"`start` time must be before `end` time\")\n\n self.start = start\n self.end = end\n self.unix = unix\n\n\n def _parse_date(self, date_string):\n \"\"\"\n Parse datetime string into datetime.datetime\n \"\"\"\n\n parts = re_split(r'(-|/|:|\\.|\\s)', date_string)\n parts = [p for p in parts if p not in ('-', '/', ':', ' ', '.')]\n\n # [year, mon, day, hour, min, sec]\n # [year, mon, day, hour, min]\n if len(parts) >= 5:\n return datetime(*list(map(int, parts)))\n \n else:\n raise TypeError(f\"Unable to parse date: {date_string}\")\n\n\n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n diff = self.end - self.start\n if self.unix:\n return [int((self.start + timedelta(seconds=randint(0, diff.total_seconds()))).timestamp()) for _ in range(n)]\n else:\n return [self.start + timedelta(seconds=randint(0, diff.total_seconds())) for _ in range(n)]\n\n\nclass Time():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of a times\n \"\"\"\n\n def __init__(self, start, end):\n \"\"\"\n Create instance of Time field type\n\n Required params:\n start: Start of time range\n end: End of time range\n \"\"\"\n \n if not isinstance(start, time):\n try:\n start = self._parse_time(start)\n except:\n raise TypeError(\"Unable to parse start time, see `help(Time)` for info\")\n\n if not isinstance(end, time):\n try:\n end = self._parse_time(end)\n except:\n raise TypeError(\"Unable to parse end time, see `help(Time)` for info\")\n\n if start >= end:\n raise ValueError(\"`start` time must be before `end` time\")\n\n self.start = start\n self.end = end \n\n\n def _parse_time(self, time_string):\n \"\"\"\n Parse time string into datetime.time\n \"\"\"\n\n parts = re_split(r'(-|/|:|\\.|\\s)', time_string)\n parts = [p for p in parts if p not in ('-', '/', ':', ' ', '.')]\n\n if len(parts) >= 2:\n return time(*list(map(int, parts)))\n \n else:\n raise TypeError(f\"Unable to parse time: {time_string}\")\n\n\n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n form = '%H:%M:%S'\n d1 = datetime.strptime(self.start.strftime(form), form)\n d2 = datetime.strptime(self.end.strftime(form), form)\n diff = d2 - d1\n return [\n (d1 + timedelta(seconds=randint(0, diff.total_seconds()))).strftime(form)\n for _ in range(n)\n ]\n\n\n#--------------#\n# #\n# ID Types #\n# #\n#--------------#\n\nclass ID():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of a IDs\n \"\"\"\n\n def __init__(self, start=1):\n \"\"\"\n Create instance of ID field type\n\n Optional params:\n start: Starting value for the ID, incremented by 1 after that point.\n Defaults to 1\n \"\"\"\n\n try:\n self.start = int(start)\n except:\n raise TypeError('`start` needs to be of type `int`')\n \n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n return [x for x in range(self.start, n+self.start)]\n\n\nclass GUID():\n \"\"\"\n A field type to be used in the \"fields\" configuration\n\n This field creates a series of a GUIDs\n \"\"\"\n\n def __init__(self, format=\"str\"):\n \"\"\"\n Create instance of GUID field type\n\n Optional params:\n format: Format type for the GUID. Can be \"str\", \"int\", or \"hex\"\n Defaults to str\n \"\"\"\n\n self.format = format\n\n def _to_series(self, n):\n \"\"\"\n Return a list of length `n` in accordance with this field type\n \"\"\"\n\n if self.format == \"hex\":\n return [uuid4().hex for _ in range(n)]\n elif self.format == \"int\":\n return [uuid4().int for _ in range(n)]\n else:\n return [uuid4() for _ in range(n)]","repo_name":"dbusteed/databuilder","sub_path":"databuilder/fields/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":15041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74140132745","text":"# robo-chatbot\nimport io\nfrom operator import index\nfrom posixpath import split\nimport random\nfrom secrets import choice\nimport string\nimport token\nimport warnings\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nnltk.download(\"popular\", quiet=True)\n\n#read the text file\nwith open(\"ganesha.txt\",\"r\", encoding=\"utf8\", errors=\"ignore\") as words:\n raw_data = words.read().lower()\n\n#Tokenization\n#converts raw data to sentences\ntoken_sentence = nltk.sent_tokenize(raw_data)\n#converts raw data to words\ntoken_word = nltk.sent_tokenize(raw_data)\n\n#Preprocessing the data inputs\nlem_processor = WordNetLemmatizer()\n#\ndef lemmerToken(tokens):\n return [lem_processor.lemmatize(token) for token in tokens]\n#\ndict_punc = dict((ord(punct), None) for punct in string.punctuation)\n#\ndef lemmerNormalize(text):\n return lemmerToken(nltk.word_tokenize(text.lower().translate(dict_punc)))\n\n# keyword match\ngreetings_inputs = (\"namaste\",\"namaskar\",\"hi\", \"hello\", \"howdy\",\"hey\", \"g'day\")\ngreetings_responses = [\"namaskar\",\"namaste\",\"hi\", \"hello\", \"hey\", \"hi there\"]\n\n# greeting generation\ndef ganesha_greeting(sentence):\n for word in sentence.split():\n if word.lower() in greetings_inputs:\n return random.choice(greetings_responses)\n\n# generate response to user\ndef ganesha_response(userResponse):\n ganeshaResponse = \" \"\n token_sentence.append(userResponse)\n Tfid_fVectorizer = TfidfVectorizer(tokenizer=lemmerNormalize, stop_words=\"english\")\n Tfid = Tfid_fVectorizer.fit_transform(token_sentence)\n cos_val = cosine_similarity(Tfid[-1],Tfid)\n index_val = cos_val.argsort()[0][-2]\n flat = cos_val.flatten()\n flat.sort()\n req_Tfid = flat[-2]\n if (req_Tfid==0):\n ganeshaResponse=ganeshaResponse+\"Could you please rephrase what you mean?\"\n return ganeshaResponse\n else:\n ganeshaResponse = ganeshaResponse+ token_sentence[index_val]\n return ganeshaResponse\n\n\nkeepGoing = True\nprint(\"Ganesh: Namaste, this is Ganesh. Please ask me your questions. If not, please type Bye!\")\n#\nwhile(keepGoing == True):\n userResponse = input()\n userResponse = userResponse.lower()\n if (userResponse != \"bye\"):\n if (userResponse == \"thanks\" or userResponse == \"thank you\"):\n keepGoing = False\n print(\"Ganesh: You are must welcome!\")\n else:\n if (ganesha_greeting(userResponse) != None):\n print(\"Ganesh: \"+ganesha_greeting(userResponse))\n else:\n print(\"Ganesh: \",end=\"\")\n print(ganesha_response(userResponse))\n token_sentence.remove(userResponse)\n\n else:\n keepGoing = False\n print(\"Ganesh: See you, please take care of you. Have a good one!\")\n","repo_name":"YogeshGurung/chat_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36629151226","text":"import os\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nfrom .metrics import masked_mape_np\nfrom time import time\nfrom scipy.sparse.linalg import eigs\n\n\ndef re_normalization(x, mean, std):\n x = x * std + mean\n return x\n\n\ndef max_min_normalization(x, _max, _min):\n x = 1. * (x - _min) / (_max - _min)\n x = x * 2. - 1.\n return x\n\n\ndef re_max_min_normalization(x, _max, _min):\n x = (x + 1.) / 2.\n x = 1. * x * (_max - _min) + _min\n return x\n\n\ndef get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=None):\n '''\n Parameters\n ----------\n distance_df_filename: str, path of the csv file contains edges information\n\n num_of_vertices: int, the number of vertices\n\n Returns\n ----------\n A: np.ndarray, adjacency matrix\n\n '''\n if 'npy' in distance_df_filename:\n\n adj_mx = np.load(distance_df_filename)\n\n return adj_mx, None\n\n else:\n\n import csv\n\n A = np.zeros((int(num_of_vertices), int(num_of_vertices)),\n dtype=np.float32)\n\n distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)),\n dtype=np.float32)\n\n # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序\n if id_filename:\n\n with open(id_filename, 'r') as f:\n id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\\n'))} # 把节点id(idx)映射成从0开始的索引\n\n with open(distance_df_filename, 'r') as f:\n f.readline() # 略过表头那一行\n reader = csv.reader(f)\n for row in reader:\n if len(row) != 3:\n continue\n i, j, distance = int(row[0]), int(row[1]), float(row[2])\n A[id_dict[i], id_dict[j]] = 1\n distaneA[id_dict[i], id_dict[j]] = distance\n return A, distaneA\n\n else: # distance file中的id直接从0开始\n\n with open(distance_df_filename, 'r') as f:\n f.readline()\n reader = csv.reader(f)\n for row in reader:\n if len(row) != 3:\n continue\n i, j, distance = int(row[0]), int(row[1]), float(row[2])\n A[i, j] = 1\n distaneA[i, j] = distance\n return A, distaneA\n\n\ndef get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None):\n '''\n Parameters\n ----------\n distance_df_filename: str, path of the csv file contains edges information\n\n num_of_vertices: int, the number of vertices\n\n Returns\n ----------\n A: np.ndarray, adjacency matrix\n\n '''\n if 'npy' in distance_df_filename:\n\n adj_mx = np.load(distance_df_filename)\n\n return adj_mx, None\n\n else:\n\n import csv\n\n A = np.zeros((int(num_of_vertices), int(num_of_vertices)),\n dtype=np.float32)\n\n distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)),\n dtype=np.float32)\n\n # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序\n if id_filename:\n\n with open(id_filename, 'r') as f:\n id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\\n'))} # 把节点id(idx)映射成从0开始的索引\n\n with open(distance_df_filename, 'r') as f:\n f.readline() # 略过表头那一行\n reader = csv.reader(f)\n for row in reader:\n if len(row) != 3:\n continue\n i, j, distance = int(row[0]), int(row[1]), float(row[2])\n A[id_dict[i], id_dict[j]] = 1\n A[id_dict[j], id_dict[i]] = 1\n distaneA[id_dict[i], id_dict[j]] = distance\n distaneA[id_dict[j], id_dict[i]] = distance\n return A, distaneA\n\n else: # distance file中的id直接从0开始\n\n with open(distance_df_filename, 'r') as f:\n f.readline()\n reader = csv.reader(f)\n for row in reader:\n if len(row) != 3:\n continue\n i, j, distance = int(row[0]), int(row[1]), float(row[2])\n A[i, j] = 1\n A[j, i] = 1\n distaneA[i, j] = distance\n distaneA[j, i] = distance\n return A, distaneA\n\n\ndef get_Laplacian(A):\n '''\n compute the graph Laplacian, which can be represented as L = D − A\n\n Parameters\n ----------\n A: np.ndarray, shape is (N, N), N is the num of vertices\n\n Returns\n ----------\n Laplacian matrix: np.ndarray, shape (N, N)\n\n '''\n\n assert (A - A.transpose()).sum() == 0 # 首先确保A是一个对称矩阵\n\n D = np.diag(np.sum(A, axis=1)) # D是度矩阵,只有对角线上有元素\n\n L = D - A # L是实对称矩阵A,有n个不同特征值对应的特征向量是正交的。\n\n return L\n\n\ndef scaled_Laplacian(W):\n '''\n compute \\tilde{L}\n\n Parameters\n ----------\n W: np.ndarray, shape is (N, N), N is the num of vertices\n\n Returns\n ----------\n scaled_Laplacian: np.ndarray, shape (N, N)\n\n '''\n\n assert W.shape[0] == W.shape[1]\n\n D = np.diag(np.sum(W, axis=1)) # D是度矩阵,只有对角线上有元素\n\n L = D - W # L是实对称矩阵A,有n个不同特征值对应的特征向量是正交的。\n\n lambda_max = eigs(L, k=1, which='LR')[0].real # 求解拉普拉斯矩阵的最大奇异值\n\n return (2 * L) / lambda_max - np.identity(W.shape[0])\n\n\ndef sym_norm_Adj(W):\n '''\n compute Symmetric normalized Adj matrix\n\n Parameters\n ----------\n W: np.ndarray, shape is (N, N), N is the num of vertices\n\n Returns\n ----------\n Symmetric normalized Laplacian: (D^hat)^1/2 A^hat (D^hat)^1/2; np.ndarray, shape (N, N)\n '''\n assert W.shape[0] == W.shape[1]\n\n N = W.shape[0]\n W = W + np.identity(N) # 为邻居矩阵加上自连接\n D = np.diag(np.sum(W, axis=1))\n sym_norm_Adj_matrix = np.dot(np.sqrt(D), W)\n sym_norm_Adj_matrix = np.dot(sym_norm_Adj_matrix, np.sqrt(D))\n\n return sym_norm_Adj_matrix\n\n\ndef norm_Adj(W):\n '''\n compute normalized Adj matrix\n\n Parameters\n ----------\n W: np.ndarray, shape is (N, N), N is the num of vertices\n\n Returns\n ----------\n normalized Adj matrix: (D^hat)^{-1} A^hat; np.ndarray, shape (N, N)\n '''\n assert W.shape[0] == W.shape[1]\n\n N = W.shape[0]\n W = W + np.identity(N) # 为邻接矩阵加上自连接\n D = np.diag(1.0 / np.sum(W, axis=1))\n norm_Adj_matrix = np.dot(D, W)\n\n return norm_Adj_matrix\n\n\ndef multichannel_norm_adj(A):\n norm_adj_list = []\n for c in range(A.shape[0]):\n norm_adj_list.append(norm_Adj(A[c]))\n\n output = np.stack(norm_adj_list)\n return output\n\n\ndef trans_norm_Adj(W):\n '''\n compute normalized Adj matrix\n\n Parameters\n ----------\n W: np.ndarray, shape is (N, N), N is the num of vertices\n\n Returns\n ----------\n Symmetric normalized Laplacian: (D^hat)^1/2 A^hat (D^hat)^1/2; np.ndarray, shape (N, N)\n '''\n assert W.shape[0] == W.shape[1]\n\n W = W.transpose()\n N = W.shape[0]\n W = W + np.identity(N) # 为邻居矩阵加上自连接\n D = np.diag(1.0 / np.sum(W, axis=1))\n trans_norm_Adj = np.dot(D, W)\n\n return trans_norm_Adj\n\n\ndef compute_val_loss(net, val_loader, criterion, sw, epoch):\n '''\n compute mean loss on validation set\n :param net: model\n :param val_loader: torch.utils.data.utils.DataLoader\n :param criterion: torch.nn.MSELoss\n :param sw: tensorboardX.SummaryWriter\n :param epoch: int, current epoch\n :return: val_loss\n '''\n\n net.train(False) # ensure dropout layers are in evaluation mode\n\n with torch.no_grad():\n\n val_loader_length = len(val_loader) # nb of batch\n\n tmp = [] # 记录了所有batch的loss\n\n start_time = time()\n\n for batch_index, batch_data in enumerate(val_loader):\n\n encoder_inputs, decoder_inputs, labels = batch_data\n\n encoder_inputs = encoder_inputs.transpose(-1, -2) # (B, N, T, F)\n\n decoder_inputs = decoder_inputs.unsqueeze(-1) # (B, N, T, 1)\n\n labels = labels.unsqueeze(-1) # (B,N,T,1)\n\n predict_length = labels.shape[2] # T\n # encode\n encoder_output = net.encode(encoder_inputs)\n # print('encoder_output:', encoder_output.shape)\n # decode\n decoder_start_inputs = decoder_inputs[:, :, :1, :] # 只取输入的第一个值作为input,之后都用predict出来的值作为input\n decoder_input_list = [decoder_start_inputs]\n # 按着时间步进行预测\n for step in range(predict_length):\n decoder_inputs = torch.cat(decoder_input_list, dim=2)\n predict_output = net.decode(decoder_inputs, encoder_output)\n decoder_input_list = [decoder_start_inputs, predict_output]\n\n loss = criterion(predict_output, labels) # 计算误差\n tmp.append(loss.item())\n if batch_index % 100 == 0:\n print('validation batch %s / %s, loss: %.2f' % (batch_index + 1, val_loader_length, loss.item()))\n\n print('validation cost time: %.4fs' % (time() - start_time))\n\n validation_loss = sum(tmp) / len(tmp)\n sw.add_scalar('validation_loss', validation_loss, epoch)\n\n return validation_loss\n\n\ndef predict_and_save_results(net, data_loader, data_target_tensor, epoch, _max, _min, params_path, type):\n '''\n for transformerGCN\n :param net: nn.Module\n :param data_loader: torch.utils.data.utils.DataLoader\n :param data_target_tensor: tensor\n :param epoch: int\n :param _max: (1, 1, 3, 1)\n :param _min: (1, 1, 3, 1)\n :param params_path: the path for saving the results\n :return:\n '''\n net.train(False) # ensure dropout layers are in test mode\n\n start_time = time()\n\n with torch.no_grad():\n\n data_target_tensor = data_target_tensor.cpu().numpy()\n\n loader_length = len(data_loader) # nb of batch\n\n prediction = []\n\n input = [] # 存储所有batch的input\n\n start_time = time()\n\n for batch_index, batch_data in enumerate(data_loader):\n\n encoder_inputs, decoder_inputs, labels = batch_data\n\n encoder_inputs = encoder_inputs.transpose(-1, -2) # (B, N, T, F)\n\n decoder_inputs = decoder_inputs.unsqueeze(-1) # (B, N, T, 1)\n\n labels = labels.unsqueeze(-1) # (B, N, T, 1)\n\n predict_length = labels.shape[2] # T\n\n # encode\n encoder_output = net.encode(encoder_inputs)\n input.append(encoder_inputs[:, :, :, 0:1].cpu().numpy()) # (batch, T', 1)\n\n # decode\n decoder_start_inputs = decoder_inputs[:, :, :1, :] # 只取输入的第一个值作为input,之后都用predict出来的值作为input\n decoder_input_list = [decoder_start_inputs]\n\n # 按着时间步进行预测\n for step in range(predict_length):\n decoder_inputs = torch.cat(decoder_input_list, dim=2)\n predict_output = net.decode(decoder_inputs, encoder_output)\n decoder_input_list = [decoder_start_inputs, predict_output]\n\n prediction.append(predict_output.detach().cpu().numpy())\n if batch_index % 100 == 0:\n print('predicting testing set batch %s / %s, time: %.2fs' % (batch_index + 1, loader_length, time() - start_time))\n\n print('test time on whole data:%.2fs' % (time() - start_time))\n input = np.concatenate(input, 0)\n input = re_max_min_normalization(input, _max[0, 0, 0, 0], _min[0, 0, 0, 0])\n\n prediction = np.concatenate(prediction, 0) # (batch, N, T', 1)\n prediction = re_max_min_normalization(prediction, _max[0, 0, 0, 0], _min[0, 0, 0, 0])\n data_target_tensor = re_max_min_normalization(data_target_tensor, _max[0, 0, 0, 0], _min[0, 0, 0, 0])\n\n print('input:', input.shape)\n print('prediction:', prediction.shape)\n print('data_target_tensor:', data_target_tensor.shape)\n output_filename = os.path.join(params_path, 'output_epoch_%s_%s' % (epoch, type))\n np.savez(output_filename, input=input, prediction=prediction, data_target_tensor=data_target_tensor)\n\n # 计算误差\n excel_list = []\n prediction_length = prediction.shape[2]\n\n for i in range(prediction_length):\n assert data_target_tensor.shape[0] == prediction.shape[0]\n print('current epoch: %s, predict %s points' % (epoch, i))\n mae = mean_absolute_error(data_target_tensor[:, :, i], prediction[:, :, i, 0])\n rmse = mean_squared_error(data_target_tensor[:, :, i], prediction[:, :, i, 0]) ** 0.5\n mape = masked_mape_np(data_target_tensor[:, :, i], prediction[:, :, i, 0], 0)\n print('MAE: %.2f' % (mae))\n print('RMSE: %.2f' % (rmse))\n print('MAPE: %.2f' % (mape))\n excel_list.extend([mae, rmse, mape])\n\n # print overall results\n mae = mean_absolute_error(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1))\n rmse = mean_squared_error(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1)) ** 0.5\n mape = masked_mape_np(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0)\n print('all MAE: %.2f' % (mae))\n print('all RMSE: %.2f' % (rmse))\n print('all MAPE: %.2f' % (mape))\n excel_list.extend([mae, rmse, mape])\n print(excel_list)\n\n\ndef load_graphdata_normY_channel1(graph_signal_matrix_filename, num_of_hours, num_of_days, num_of_weeks, DEVICE, batch_size, shuffle=True, percent=1.0):\n \"\"\"\n 将x,y都处理成归一化到[-1,1]之前的数据;\n 每个样本同时包含所有监测点的数据,所以本函数构造的数据输入时空序列预测模型;\n 该函数会把hour, day, week的时间串起来;\n 注: 从文件读入的数据,x,y都是归一化后的值\n :param graph_signal_matrix_filename: str\n :param num_of_hours: int\n :param num_of_days: int\n :param num_of_weeks: int\n :param DEVICE:\n :param batch_size: int\n :return:\n three DataLoaders, each dataloader contains:\n test_x_tensor: (B, N_nodes, in_feature, T_input)\n test_decoder_input_tensor: (B, N_nodes, T_output)\n test_target_tensor: (B, N_nodes, T_output)\n\n \"\"\"\n\n file = os.path.basename(graph_signal_matrix_filename).split('.')[0]\n\n dirpath = os.path.dirname(graph_signal_matrix_filename)\n\n filename = os.path.join(dirpath,\n file + '_r' + str(num_of_hours) + '_d' + str(num_of_days) + '_w' + str(num_of_weeks) + '.npz')\n\n print('load file:', filename)\n\n file_data = np.load(filename)\n train_x = file_data['train_x'] # (10181, 307, 3, 12)\n train_x = train_x[:, :, 0:1, :]\n train_target = file_data['train_target'] # (10181, 307, 12)\n train_timestamp = file_data['train_timestamp'] # (10181, 1)\n\n train_x_length = train_x.shape[0]\n scale = int(train_x_length * percent)\n print('ori length:', train_x_length, ', percent:', percent, ', scale:', scale)\n train_x = train_x[:scale]\n train_target = train_target[:scale]\n train_timestamp = train_timestamp[:scale]\n\n val_x = file_data['val_x']\n val_x = val_x[:, :, 0:1, :]\n val_target = file_data['val_target']\n val_timestamp = file_data['val_timestamp']\n\n test_x = file_data['test_x']\n test_x = test_x[:, :, 0:1, :]\n test_target = file_data['test_target']\n test_timestamp = file_data['test_timestamp']\n\n _max = file_data['mean'] # (1, 1, 3, 1)\n _min = file_data['std'] # (1, 1, 3, 1)\n\n # 统一对y进行归一化,变成[-1,1]之间的值\n train_target_norm = max_min_normalization(train_target, _max[:, :, 0, :], _min[:, :, 0, :])\n test_target_norm = max_min_normalization(test_target, _max[:, :, 0, :], _min[:, :, 0, :])\n val_target_norm = max_min_normalization(val_target, _max[:, :, 0, :], _min[:, :, 0, :])\n\n if \"HZME\" in filename:\n # filter 0-6点的数据\n train_retain = train_timestamp % (24 * 12) > 6 * 12\n train_retain_index = np.where(train_retain == True)\n train_x = train_x[train_retain_index[0], :, :]\n train_target_norm = train_target_norm[train_retain_index[0], :, :]\n train_timestamp = train_timestamp[train_retain_index[0], :]\n\n val_retain = val_timestamp % (24 * 12) > 6 * 12\n val_retain_index = np.where(val_retain == True)\n val_x = val_x[val_retain_index[0], :, :]\n val_target_norm = val_target_norm[val_retain_index[0], :, :]\n val_timestamp = val_timestamp[val_retain_index[0], :]\n\n test_retain = test_timestamp % (24 * 12) > 6 * 12\n test_retain_index = np.where(test_retain == True)\n test_x = test_x[test_retain_index[0], :, :]\n test_target_norm = test_target_norm[test_retain_index[0], :, :]\n test_timestamp = test_timestamp[test_retain_index[0], :]\n\n # ------- train_loader -------\n train_decoder_input_start = train_x[:, :, 0:1, -1:] # (B, N, 1(F), 1(T)),最后已知traffic flow作为decoder 的初始输入\n train_decoder_input_start = np.squeeze(train_decoder_input_start, 2) # (B,N,T(1))\n train_decoder_input = np.concatenate((train_decoder_input_start, train_target_norm[:, :, :-1]), axis=2) # (B, N, T)\n\n train_x_tensor = torch.from_numpy(train_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)\n train_decoder_input_tensor = torch.from_numpy(train_decoder_input).type(torch.FloatTensor).to(DEVICE) # (B, N, T)\n train_target_tensor = torch.from_numpy(train_target_norm).type(torch.FloatTensor).to(DEVICE) # (B, N, T)\n\n train_dataset = torch.utils.data.TensorDataset(train_x_tensor, train_decoder_input_tensor, train_target_tensor)\n\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle)\n\n # ------- val_loader -------\n val_decoder_input_start = val_x[:, :, 0:1, -1:] # (B, N, 1(F), 1(T)),最后已知traffic flow作为decoder 的初始输入\n val_decoder_input_start = np.squeeze(val_decoder_input_start, 2) # (B,N,T(1))\n val_decoder_input = np.concatenate((val_decoder_input_start, val_target_norm[:, :, :-1]), axis=2) # (B, N, T)\n\n val_x_tensor = torch.from_numpy(val_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)\n val_decoder_input_tensor = torch.from_numpy(val_decoder_input).type(torch.FloatTensor).to(DEVICE) # (B, N, T)\n val_target_tensor = torch.from_numpy(val_target_norm).type(torch.FloatTensor).to(DEVICE) # (B, N, T)\n\n val_dataset = torch.utils.data.TensorDataset(val_x_tensor, val_decoder_input_tensor, val_target_tensor)\n\n val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size * 32)\n\n # ------- test_loader -------\n test_decoder_input_start = test_x[:, :, 0:1, -1:] # (B, N, 1(F), 1(T)),最后已知traffic flow作为decoder 的初始输入\n test_decoder_input_start = np.squeeze(test_decoder_input_start, 2) # (B,N,T(1))\n test_decoder_input = np.concatenate((test_decoder_input_start, test_target_norm[:, :, :-1]), axis=2) # (B, N, T)\n\n test_x_tensor = torch.from_numpy(test_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)\n test_decoder_input_tensor = torch.from_numpy(test_decoder_input).type(torch.FloatTensor).to(DEVICE) # (B, N, T)\n test_target_tensor = torch.from_numpy(test_target_norm).type(torch.FloatTensor).to(DEVICE) # (B, N, T)\n\n test_dataset = torch.utils.data.TensorDataset(test_x_tensor, test_decoder_input_tensor, test_target_tensor)\n\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size * 32)\n\n # print\n print('train:', train_x_tensor.size(), train_decoder_input_tensor.size(), train_target_tensor.size())\n print('val:', val_x_tensor.size(), val_decoder_input_tensor.size(), val_target_tensor.size())\n print('test:', test_x_tensor.size(), test_decoder_input_tensor.size(), test_target_tensor.size())\n\n return train_loader, train_target_tensor, val_loader, val_target_tensor, test_loader, test_target_tensor, _max, _min\n","repo_name":"DrownFish19/CorrSTN","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20562,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"42883081119","text":"\"\"\"\nbeecrowd | 3250\nProblema no Elevador\n\nVocê está a caminho de sua primeira entrevista de emprego como testador de programa e já está atrasado. A entrevista é em um arranha-céu e você está no andar s, onde vê um elevador. Ao entrar no elvator, você aprende que ele possui apenas dois botões, marcados “UP u” e “DOWN d”. Você conclui que o botão UP leva o elevador u andares para cima (se não houver andares suficientes, pressionar o botão UP não faz nada, ou pelo menos é o que você supõe), enquanto o botão DOWN leva você d andares para baixo (ou nenhum se não houver o suficiente). Sabendo que a entrevista é no andar g e que há apenas f andares no prédio, você rapidamente decide escrever um programa que fornece a quantidade de apertos de botão que você precisa para executar. Se você simplesmente não conseguir chegar ao andar correto, seu programa é interrompido com a mensagem “use as escadas”.\n\nDada a entrada f, s, g, u e d (andares, início, meta, cima, baixo), encontre a sequência mais curta de pressionamentos de botão que você deve pressionar para ir de s para g, em um edifício de f andares, ou a saída “use as escadas” se você não puder ir de s para g pelo elevador fornecido.\n\nEntrada\nA entrada consistirá em uma linha, ou seja, f s g u d, onde 1 ≤ s, g ≤ f ≤ 1000000 e 0 ≤ u, d ≤ 1000000. Os pisos são indexados em um, ou seja, se houver 10 andares, s e g estarão em [1, 10].\n\nSaída\nVocê deve responder com o número mínimo de empurrões que você deve fazer para ir de s para g, ou a saída \"use the stairs\" se for impossível dada a configuração do elevador.\n\n\nExemplo de Entrada\tExemplo de Saída\n10 1 10 2 1 6\n100 2 1 1 0 use the stairs\n\"\"\"\n\nclass Vertex:\n def __init__(self):\n self.cor = \"BRANCO\"\n self.pai = None\n self.dist = float(\"inf\")\n self.to = []\n\n\nclass Graph:\n def __init__(self, n):\n self.n = n\n self.adj = [Vertex() for i in range(n)]\n\n \"\"\"\n Adiciona uma aresta direcionada que sai do vértice u para o vértice v com peso w\n u: inteiro que representa o vértice de origem\n v: inteiro que representa o vértice de destino\n w: inteiro que representa o peso da aresta (opcional)\n \"\"\"\n def addEdge(self, u, v):\n self.adj[u].to.append(v)\n\n \"\"\"\n Faz a busca em largura no grafo a partir do vértice s\n s: inteiro que representa o vértice de origem\n \"\"\"\n def BFS(self, s):\n self.adj[s].cor = \"CINZA\"\n self.adj[s].dist = 0\n self.adj[s].pai = None\n fila = [s]\n while fila:\n u = fila.pop(0)\n for v in self.adj[u].to:\n if self.adj[v].cor == \"BRANCO\":\n self.adj[v].cor = \"CINZA\"\n self.adj[v].dist = self.adj[u].dist + 1\n self.adj[v].pai = u\n fila.append(v)\n self.adj[u].cor = \"PRETO\"\n \n \"\"\"\n Printar o grafo, com seus vertices, arestas e cores\n \"\"\"\n def printGraph(self):\n for i in range(self.n):\n print(\"Vertice: \", i, \" Cor: \", self.adj[i].cor, \" Pai: \", self.adj[i].pai, \" Distancia: \", self.adj[i].dist, \" Vizinhos: \", self.adj[i].to)\n\ndef main():\n f, s, g, u, d = map(int, input().split())\n grafo = Graph(f+1)\n for i in range(1, f+1):\n if i+u <= f:\n grafo.addEdge(i, i+u)\n if i-d >= 1:\n grafo.addEdge(i, i-d)\n grafo.BFS(s)\n if grafo.adj[g].dist == float(\"inf\"):\n print(\"use the stairs\")\n else:\n print(grafo.adj[g].dist)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"felipolis/UTFPR","sub_path":"5 - Teoria dos Grafos/Listas URI ONLINE/LISTA 03 - BFS (BUSCA EM LARGURA)/Problema no Elevador.py","file_name":"Problema no Elevador.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10646508475","text":"import numpy as np \nimport pandas as pd \nimport random as rd\nimport matplotlib.pyplot as plt \nimport pickle \n\ndf = pd.read_csv('housingdata.csv')\nrows, columns = df.shape\n\nX = np.array(df.drop(df.columns[[columns-1]],1))\ny = np.array(df[df.columns[-1]])\n\nX = X.tolist()\ny = y.tolist()\n\ntest_size = 0.2\nno_of_iteration = 500\nalpha = 1\n\ndef test_train_split(X,y,test_size=0.2):\n\tm = len(X)\n\tn = int(m*-test_size)\n\tX_train = X[:n]\n\tX_test = X[n:]\n\ty_train = y[:n]\n\ty_test = y[n:]\n\treturn X_train,X_test,y_train,y_test\ndef normaliese(X):\n\tnorm=[]\n\tfor i in range(len(X[0])):\n\t\tnew_list = [item[i] for item in X]\n\t\tu,v = (max(new_list)-min(new_list),sum(new_list)/len(X))\n\t\tnorm.append((u,v))\n\tfor i in range(len(X)):\n\t\tfor j in range(len(norm)):\n\t\t\tif j is not 0:\n\t\t\t\tX[i][j] = (X[i][j]-norm[j][1])/float(norm[j][0])\n\treturn X\n\t\t\nclass MultiLinearRegression:\n\ttheta = []\n\tdef __init__(self):\n\t\tself.theta = []\n\t\n\tdef predict(self,feature):\n\t\th=0;\n\t\tfor i in range(len(self.theta)):\n\t\t\th += self.theta[i]*feature[i]\n\t\treturn h \n\t\n\tdef getCost(self,X,y):\n\t\tcost = 0;\n\t\tfor i in range(len(X)):\n\t\t\tcost += (self.predict(X[i])-y[i])**2\n\t\treturn cost\n\tdef fit(self,X,y,no_of_iteration=300,alpha=0.1):\n\t\tsize = len(X)\n\t\tno_of_features = len(X[0])\n\t\tself.theta=[rd.random() for i in range(no_of_features)]\n\t\tfor i in range(no_of_iteration):\n\t\t\tnew_theta = []\n\t\t\tfor j in range(no_of_features):\n\t\t\t\tsum=0;\n\t\t\t\tfor k in range(size):\n\t\t\t\t\tsum += (self.predict(X[k])-y[k])*X[k][j]\n\t\t\t\tnew_theta.append(self.theta[j]-(float(alpha)/size)*sum)\n\t\t\tself.theta = new_theta\n\t\t\tcost = self.getCost(X,y)\n\nX = [[1]+X[i] for i in range(len(X))]\nX = normaliese(X)\nX_train,X_test,y_train,y_test = test_train_split(X,y,test_size)\n#reg = MultiLinearRegression()\n#reg.fit(X_train,y_train,no_of_iteration,alpha)\n#with open('multiregression.pickle','wb') as f:\n#\tpickle.dump(reg,f)\n\npickle_in = open('multiregression.pickle','rb')\nreg = pickle.load(pickle_in)\ncoff = reg.theta\n","repo_name":"rohitkes/ml","sub_path":"LinearRegression/House_Price_Prediction/predict_house_price.py","file_name":"predict_house_price.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21465828184","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StringType, StructField, IntegerType, StructType, NumericType\nfrom pyspark.sql.functions import col, lit, concat\n\n\nspark = SparkSession.builder.appName(\"Learning Pyspark Airline\").config(\"deployment.mode\", \"client\").master(\"yarn\").getOrCreate()\n\n# Creating dataframe\n# airlines = spark.read.csv(\n# \"hdfs://namenode:9000/user/spark/datasets/airtrafficdata/airlines_all/airlines\",\n# header=True,\n# inferSchema=True\n# )\n\n# schema = airlines.schema\n\n# airlines.printSchema()\n# print(\"Total Records =\", airlines.count())\n# print(\"Total Distinct Rows =\", airlines.distinct().count())\n# airlines.show(10)\n\nemployees = [\n (1, \"Scott\", \"Tiger\", 1000.0, \"united states\"),\n (2, \"Henry\", \"Ford\", 1250.0, \"India\"),\n (3, \"Nick\", \"Junior\", 750.0, \"united KINGDOM\"),\n (4, \"Bill\", \"Gomes\", 1500.0, \"AUSTRALIA\")\n]\n\nemployees = spark.createDataFrame(\n employees, schema=\"\"\"\n employee_id INT,\n first_name STRING,\n last_name STRING,\n salary FLOAT,\n nationality STRING\n \"\"\"\n)\n\nemployees.printSchema()\nemployees.show()\n\n# Projection\nemployees.select(\"first_name\", \"last_name\").show()\nemployees.drop(\"nationality\")\nemployees.withColumn(\"full_name\", concat('first_name', lit(' '), 'last_name')).show()\nemployees.selectExpr(\"concat(first_name, ' ', last_name) AS full_name\").show()\nemployees.select(\"employee_id\", concat(\"first_name\", lit(' '), \"last_name\").alias(\"full_name\")).show()\n\n# employees.write.parquet(\"\", mode=\"overwrite\", compression=None)\n# employees.write.mode(\"overwrite\").option(\"compression\", 'none').format('parquet').save(\"\")\n# employees.write.mode('overwrite').option('compression', 'none').parquet(\"\")\n# employees.coalesce(1).write.mode('overwrite').option('compression', 'none').parquet(\"\")\n# employees.coalesce(1).write.mode('ignore').option('compression', 'none').parquet(\"\") # For ignoring write if path already present\n\n# when-otherwise ","repo_name":"mukimmanan/apache-server","sub_path":"projects/py/airlines.py","file_name":"airlines.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32628459489","text":"import re\n검사기 = re.compile('[a-zA-Z]')\nseqs모음집 = []\ninfo = []\nwith open('rosalind_ba2a.txt','r') as f:\n for line in f:\n if 검사기.match(line):\n seqs모음집.append(line.strip())\n else:\n info = [int(i) for i in line.strip().split()]\nbase = 'ACGT'\ndef finding(target,remaining_dist):\n remain = remaining_dist\n result = []\n \n def finding_neighbors(target,remaining_dist,index):\n remain = remaining_dist\n if remain == 0:\n result.append(target)\n return\n for i in range(index,len(target)):\n for char in base:\n if target[i] != char:\n # remaining_dist -= 1 \n new_seq = target[:i] + char + target[i+1:]\n # index = i+1\n finding_neighbors(new_seq,remaining_dist-1,i+1)\n finding_neighbors(target,remaining_dist,0)\n return result\ntarget_모음집 = [seqs모음집[0][i:i+info[0]] for i in range(len(seqs모음집[0])-info[0]+1)]\nneighbors= []\n\n\nfor i in target_모음집:\n for j in range(info[1]+1):\n a = finding(i,j)\n neighbors.extend(a)\n\n\n\n\n# 검사\ncheck여부 = {}\n\nfor neighbor in neighbors:\n neighbor_found = False # 현재 neighbor에 대한 검사 결과\n for seqs in seqs모음집:\n seq_found = False # 현재 시퀀스에 대한 검사 결과\n for k in range(len(seqs) - info[0] + 1):\n seq_particle = seqs[k:k + info[0]]\n count = 0\n for u in range(info[0]):\n if seq_particle[u] != neighbor[u]:\n count += 1\n if count <= info[1]:\n seq_found = True # 시퀀스에 대해 조건을 만족하는 경우\n break # 해당 시퀀스에서 조건을 만족하는 부분이 발견되면 중단\n if not seq_found:\n break # 시퀀스에 대해 조건을 만족하는 부분이 발견되지 않으면 중단\n if seq_found:\n check여부[neighbor] = True\n else:\n check여부[neighbor] = False\nprint(check여부)\n# 검사 결과 출력\nfor neighbor, found in check여부.items():\n if found:\n print(neighbor)","repo_name":"codenamegyoungho/rosalind_solution","sub_path":"rosalind_2aa.py","file_name":"rosalind_2aa.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16254353997","text":"'''\nCreated on Jun 19, 2010\n\n@author: jnaous\n'''\nfrom models import DatedMessage\nfrom django.conf import settings\ndef messaging(request):\n if request.user.is_authenticated():\n qs = DatedMessage.objects.get_messages_for_user(\n request.user).order_by('-datetime')[:settings.NUM_CONTEXT_MSGS]\n l = list(qs)\n else:\n l = []\n return {\"messages\": l}\n","repo_name":"fp7-ofelia/ocf","sub_path":"vt_manager/src/python/vt_manager/common/messaging/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"75082929226","text":"\n\nimport cv2\nimport numpy as np\n\n# Load the original image\nimage = cv2.imread(\"E:\\7th Sem\\DIP\\Assignment2\\data\\1-3.jpg\")\n\n\n# Define the region of interest (ROI) coordinates and dimensions\nregion_x = 310 \nregion_y = 40 \n\n\nroi_width = 160 \nroi_height = 140 \n\n# Extract the ROI as a template\ntemplate = image[region_y:region_y + roi_height, region_x:region_x + roi_width]\n\n# Define the lower region\n\nlower_region_x = 0 \nlower_region_y = 200 \n\n\nlower_roi_width = image.shape[1] \nlower_roi_height = image.shape[0] - lower_region_y \n\n\n#Code to crop the lower region of the image\n\nlower_region = image[lower_region_y:lower_region_y + lower_roi_height, lower_region_x:lower_region_x + lower_roi_width]\n\n\n# Using CV's match funtion to check if two templates match\ncheck_match_found = cv2.matchTemplate(lower_region, template, cv2.TM_CCOEFF_NORMED)\n\n#Amount of threshold which will show if match or not\nmin_thres = 0.95\n\n\n# Matches above the threshold assigned\nBoxes_match = np.where(check_match_found >= min_thres)\n\n# Reverse the coordinates\nBoxes_match = list(zip(*Boxes_match[::-1])) \n\n##IF no threshold no match then print\nif not Boxes_match:\n print(\"No Box to be match found\")\n#IF match found\nelse:\n # Draw rectangles around the matched areas on the original image\n for location in Boxes_match:\n \n top_left = (location[0] + lower_region_x, location[1] + lower_region_y)\n template_height, template_width, _ = template.shape\n \n bottom_right = (top_left[0] + template_width, top_left[1] + template_height)\n cv2.rectangle(image, top_left, bottom_right, (0, 255, 0), 2)\n\n \n \n #Showing the image with a green outline outside the boxes which match\n cv2.imshow('Images with match are shown as', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Moosa-123/DIP_Assignment2","sub_path":"Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16010543154","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\ndef get_first_name():\n name = 'Nobody'\n if auth.user:\n name = auth.user.first_name\n return name\n\ndef get_email():\n email = ''\n if auth.user:\n email = auth.user.email\n return email\n \nCATEGORY = ['Car',\n 'Bike',\n 'Books',\n 'Music',\n 'Outdoors',\n 'For the House',\n 'Misc.']\n\ndb.define_table('bboard',\n Field('name'),\n Field('user_id', db.auth_user),\n Field('phone'),\n Field('email'),\n Field('category'),\n Field('date_posted', 'datetime'),\n Field('title'),\n Field('price'),\n Field('sold', 'boolean'),\n Field('image', 'upload'),\n Field('bbmessage', 'text'),\n )\n\n\ndb.bboard.id.readable = False\ndb.bboard.bbmessage.label = 'Message'\ndb.bboard.name.default = get_first_name()\ndb.bboard.date_posted.default = datetime.utcnow()\ndb.bboard.name.writable = False\ndb.bboard.date_posted.writable = False\ndb.bboard.user_id.default = auth.user_id\ndb.bboard.user_id.writable = db.bboard.user_id.readable = False\ndb.bboard.email.default = get_email()\ndb.bboard.email.writable = False\ndb.bboard.category.requires = IS_IN_SET(CATEGORY, zero = None)\ndb.bboard.category.default = 'Misc.'\ndb.bboard.category.required = True\ndb.bboard.sold.default = False\ndb.bboard.phone.requires = IS_MATCH('^1?((-)\\d{3}-?|\\(\\d{3}\\))\\d{3}-?\\d{4}$',\n error_message='not a phone number')\n#db.bboard.image.readable = False \n","repo_name":"JonoYang/cmps183-lboard","sub_path":"models/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73724406984","text":"# Let's create a Quiz\n\nfrom Question import Question\n\n# Represents the prompts of the question\nprompts = [\"Which of this characters can be used in python for naming a variable?\\n(a)# \\n (b)_ \\n (c)%\"]\n\n# Creates a list of questions\nquestions = [Question(prompts[0], 'b')]\n\n# Will store the questions that the user needs to review because\n# they got it wrong.\nquestions_to_review = []\n\nfor question in questions:\n answer = input(question.prompt + \"\\n\")\n\n if answer == question.answer:\n print(\"Your answer was correct!\")\n else:\n print(\"Your answer was not correct!\")\n\n # Add the questions to the list of questions to review\n # because the user got it wrong\n questions_to_review.append(question)\n","repo_name":"ITDevRene/Projects","sub_path":"Python_Projects/Quiz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15243156271","text":"import googlemaps\nfrom datetime import datetime\n\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\napi = tweepy.API(auth)\n\nmyStreamListener = MyStreamListener()\nmyStream = tweepy.Stream(auth=api.auth, listener=myStreamListener)\n\nmyStream.filter(track=['#HACKPSUHELPLINE'])\n\n# for temp in tweepy.Cursor(api.search, q=\"#HACKPSUHELPLINE\", ang=\"en\").items():\n# print(temp)\n\n# for tweet in tweepy.Cursor(api.search, q=\"#pokemonleak\", count=100,\n# lang=\"en\").items():\n# print (tweet.created_at, tweet.text)\n\n# gmaps = googlemaps.Client(key='AIzaSyBKRd5kn3yHGCja-ao7mQcmwHBbvdgvyTM')\n\n# # Geocoding an address\n# geocode_result = gmaps.geocode('1600 Amphitheatre Parkway, Mountain View, CA')\n\n# # Look up an address with reverse geocoding\n# reverse_geocode_result = gmaps.reverse_geocode((40.714224, -73.961452))\n\n# print(reverse_geocode_result)\n\n# print(\"-\" * 20)\n# # Request directions via public transit\n# now = datetime.now()\n# directions_result = gmaps.directions(\"Sydney Town Hall\",\n# \"Parramatta, NSW\",\n# mode=\"transit\",\n# departure_time=now)\n# print(directions_result)\n","repo_name":"hXtreme/hackpsu","sub_path":"app/test_coords.py","file_name":"test_coords.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25364412991","text":"import matplotlib.pyplot as plt\nimport pandas\n\nfile_name = 'score.xlsx'\n\n\n# 格式化输出列表\ndef print_list(lists):\n length = len(lists)\n for i in range(length):\n if i == length - 1:\n end = '\\n'\n else:\n end = '\\t\\t'\n print(lists[i], end=end)\n\n\n# 格式化输出行对象\ndef print_row(row):\n print(row['学号'], '\\t\\t', row['姓名'], '\\t\\t', row['英语'], '\\t\\t', row['高数'], '\\t\\t', row['C语言'], '\\t\\t', row['总分'])\n\n\n# 读取文件数据\ndef read_data(file):\n data = pandas.read_excel(file)\n\n print_list(data.columns.values)\n\n columns = data.columns.size\n rows = len(data)\n\n print_list(data.loc[0].values)\n print_list(data.loc[1].values)\n print_list(data.loc[2].values)\n\n print('[{0}行 x {1}列]'.format(3, columns))\n\n print_list(data.columns.values)\n\n print_list(data.loc[rows - 2].values)\n print_list(data.loc[rows - 1].values)\n\n print('[{0}行 x {1}列]'.format(2, columns))\n\n print('数据读取成功')\n\n\n# 数据处理(新增一列“总分”列,值为前三列成绩之和,写回excel)(注意:to_excel会删除其他的sheet,所以运行前请做好备份)\ndef handle_data(file):\n data = pandas.read_excel(file)\n\n data['总分'] = None\n\n for i in range(len(data)):\n item = data.loc[i]\n\n english = item.loc['英语']\n math = item.loc['高数']\n program = item.loc['C语言']\n\n data.loc[i, '总分'] = english + math + program\n\n data.to_excel(file, sheet_name='Sheet1', index=False, header=True)\n\n print('数据处理成功')\n\n\n# 按总分排序(从高到低)输出(注意:排序之后若通过loc[index]来取值的话,还是按照原来的顺序输出的,所以这里换一种遍历方式)\ndef sort_total_score(file):\n data = pandas.read_excel(file)\n\n data.sort_values(by='总分', ascending=False, inplace=True)\n\n for index, row in data.iterrows():\n print_row(row)\n\n print('数据排序成功')\n\n\n# 统计每门课最高分,最低分,平均分,并输出\ndef statistics_score(file):\n data = pandas.read_excel(file)\n\n length = len(data)\n\n max_english = 0\n min_english = 0\n sum_english = 0\n max_math = 0\n min_math = 0\n sum_math = 0\n max_program = 0\n min_program = 0\n sum_program = 0\n\n for i in range(length):\n item = data.loc[i]\n\n score_english = item.loc['英语']\n score_math = item.loc['高数']\n score_program = item.loc['C语言']\n\n if i == 0:\n max_english = score_english\n min_english = score_english\n max_math = score_math\n min_math = score_math\n max_program = score_program\n min_program = score_program\n else:\n if max_english < score_english:\n max_english = score_english\n if min_english > score_english:\n min_english = score_english\n\n if max_math < score_math:\n max_math = score_math\n if min_math > score_math:\n min_math = score_math\n\n if max_program < score_program:\n max_program = score_program\n if min_program > score_program:\n min_program = score_program\n\n sum_english += score_english\n sum_math += score_math\n sum_program += score_program\n\n print('英语:最高分:{0},最低分:{1},平均分:{2}'.format(max_english, min_english, round(sum_english / length, 2)))\n print('高数:最高分:{0},最低分:{1},平均分:{2}'.format(max_math, min_math, round(sum_math / length, 2)))\n print('C语言:最高分:{0},最低分:{1},平均分:{2}'.format(max_program, min_program, round(sum_program / length, 2)))\n\n\n# 查找学生成绩信息\ndef find_student(file, name):\n data = pandas.read_excel(file)\n\n for i in range(len(data)):\n item = data.loc[i]\n\n if item.loc['姓名'] == name:\n print('查到到结果:')\n print_list(data.columns.values)\n print_list(item.values)\n return\n\n print('没有该同学')\n\n\n# 画成绩分布图\ndef map_distribution(file):\n data = pandas.read_excel(file)\n\n list_english = [0 for x in range(0, 10)]\n list_math = [0 for x in range(0, 10)]\n list_program = [0 for x in range(0, 10)]\n\n score_range = [[0, 9], [10, 19], [20, 29], [30, 39], [40, 49], [50, 59], [60, 69], [70, 79], [80, 89], [90, 100]]\n\n for i in range(len(data)):\n item = data.loc[i]\n\n score_english = item.loc['英语']\n score_math = item.loc['高数']\n score_program = item.loc['C语言']\n\n for index in range(len(score_range)):\n item_range = score_range[index]\n\n start = item_range[0]\n end = item_range[1]\n\n if start <= score_english <= end:\n list_english[index] += 1\n\n if start <= score_math <= end:\n list_math[index] += 1\n\n if start <= score_program <= end:\n list_program[index] += 1\n\n name_list = ['0-9', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79', '80-89', '90-100']\n\n # 中文乱码问题\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n\n plt.figure()\n plt.bar(name_list, list_english)\n plt.title('英语成绩分布图')\n plt.xlabel('分数段')\n plt.ylabel('人数')\n\n for name, score in zip(name_list, list_english):\n if score > 0:\n plt.text(name, score + 0.1, '{0}人'.format(score), ha='center', va='bottom')\n\n plt.figure()\n plt.bar(name_list, list_math)\n plt.title('高数成绩分布图')\n plt.xlabel('分数段')\n plt.ylabel('人数')\n\n for name, score in zip(name_list, list_math):\n if score > 0:\n plt.text(name, score + 0.1, '{0}人'.format(score), ha='center', va='bottom')\n\n plt.figure()\n plt.bar(name_list, list_program)\n plt.title('C语言成绩分布图')\n plt.xlabel('分数段')\n plt.ylabel('人数')\n\n for name, score in zip(name_list, list_program):\n if score > 0:\n plt.text(name, score + 0.1, '{0}人'.format(score), ha='center', va='bottom')\n\n plt.show()\n\n\nif __name__ == '__main__':\n while True:\n print('''=============学生成绩管理系统=============\n 1、数据读取\n 2、数据处理(计算总分,数据不能包含总分列)\n 3、数据排序\n 4、统计分数\n 5、查询学生成绩\n 6、数据分布柱状图展示\n 7、退出\n ========================================\n ''')\n\n choice = input('请选择:')\n\n if choice == '1':\n read_data(file_name)\n elif choice == '2':\n handle_data(file_name)\n elif choice == '3':\n sort_total_score(file_name)\n elif choice == '4':\n statistics_score(file_name)\n elif choice == '5':\n input_name = input('请输入学生姓名:')\n find_student(file_name, input_name)\n elif choice == '6':\n map_distribution(file_name)\n elif choice == '7':\n break\n","repo_name":"llfwer/python","sub_path":"课程设计/课程设计.py","file_name":"课程设计.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41012401145","text":"# lets install \"requests\" package inside a virtual environment.\r\n\r\nimport requests\r\n\r\nurl = \"https://api.yelp.com/v3/businesses/search\"\r\napi_key = \"eioqwr54389NotTheRealAPIKey\" # get the actual API key from Yelp developer --> our APP.\r\nheaders = {\r\n \"Authorization\": \"Bearer \" + api_key\r\n}\r\nparams = {\r\n \"term\": \"Indian Food\",\r\n \"location\": \"Taipei\"\r\n}\r\n# requests.get() is used to send an HTTP request to the endpoint to get data. It returns a response object\r\nresponse = requests.get(url, headers=headers, params=params)\r\n\r\n# Use the \".text\" method to convert the response to text.\r\n# It can be used in case of error so see the description of the error.\r\nprint(response.text)\r\n\r\nresult = response.json() # To convert the response into a dictionary use the \"json()\" method.\r\nbusinesses = result[\"businesses\"] # Accessing the \"businesses\" key in the dictionary.\r\nprint(businesses)\r\nfor business in businesses:\r\n print(business[\"name\"])\r\n\r\nfilter_busi = [business[\"name\"] for business in businesses if business[\"rating\"] >= 4.5]\r\n# We are using a comprehension list to filter all the business with rating >= 4.5 and storing their name in a new list.\r\nprint(filter_busi)","repo_name":"shubhranshi/mosh-python","sub_path":"11_Popular_Python_Packages/114_searching_for_businesses.py","file_name":"114_searching_for_businesses.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3649046445","text":"from src.instruction_utility import *\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nfrom PIL import Image\nimport pandas as pd\nimport plotly.express as px\nfrom sklearn.decomposition import PCA\nfrom src.data.flat_dataset_factory import FlatDatasetFactory\n\ndef predict_epochs(instruction_parser):\n path = os.path.join(get_project_dir(), 'docs', 'assets', 'VAE_visuals', 'input', '000029_4.jpg')\n image = Image.open(path)\n image = image.resize((224, 224))\n data_render = np.asarray(image) / 255.\n data_render = np.expand_dims(data_render, axis=0)\n\n weight_files_list = sorted([os.path.join(get_visuals_dir(), file) for file in os.listdir(get_visuals_dir())])\n reconstruction_dict = {}\n\n for file in tqdm(weight_files_list):\n model = instruction_parser.get_model()\n model.compile(optimizer=instruction_parser.get_optimizer())\n model.built = True\n model.load_weights(file)\n reconstruction = model.predict(data_render)\n reconstruction_dict[file] = reconstruction\n\n return reconstruction_dict\n\ndef save_image_reconstructions(reconstruction_dict):\n index = 1\n for file in reconstruction_dict.keys():\n array = (reconstruction_dict[file][0] * 255).astype(np.uint8)\n image = Image.fromarray(array)\n target_path = os.path.join(get_project_dir(), 'docs', 'assets', 'VAE_visuals', 'reconstructions')\n image.save(f'{target_path}/{index:03d}.jpeg')\n index += 1\n\ndef predict_embeddings(instruction_parser):\n database = load_dataframe(\"data/processed/category_id_1_min_pair_count_10_deepfashion_train.joblib\")\n model_factory = instruction_parser.model_factory\n factory = FlatDatasetFactory(database, preprocessor=model_factory.preprocessor())\n data = factory.get_dataset(batch_size=385, shuffle=False)\n\n label_batches = []\n for x, y in data.take(1):\n images_preprocessed = x\n label_batches.append(y)\n\n labels = np.concatenate(label_batches)\n\n\n weight_files_list = sorted([os.path.join(get_visuals_dir(), file) for file in os.listdir(get_visuals_dir())])\n embedding_dict = {}\n\n for file in tqdm(weight_files_list):\n model = instruction_parser.get_model()\n model.compile(optimizer=instruction_parser.get_optimizer())\n model.built = True\n model.load_weights(file)\n _, _, embeddings = model.encoder.predict(images_preprocessed)\n embedding_dict[file] = embeddings\n\n return embedding_dict, labels\n\ndef save_PCA_images(embedding_dict, labels):\n database = load_dataframe(\"data/processed/category_id_1_min_pair_count_10_deepfashion_train.joblib\")\n index = 1\n for file in embedding_dict.keys():\n pca = PCA(n_components=2)\n embeddings = embedding_dict[file]\n pca.fit(embeddings)\n components = pca.transform(embeddings)\n pca_df = pd.DataFrame(components, columns=['pc1', 'pc2'])\n pca_df['pair_id'] = labels\n pca_df['pair_id'] = pca_df['pair_id'].astype(int)\n pca_df['pair_id'] = pca_df['pair_id'].astype(str)\n\n fig = px.scatter(pca_df,\n x='pc1',\n y='pc2',\n color='pair_id',\n range_x=(-30, 30),\n range_y=(-30, 30),\n color_discrete_sequence=px.colors.qualitative.Dark24,\n width=600,\n height=500)\n\n target_path = os.path.join(get_project_dir(), 'docs', 'assets', 'VAE_visuals', 'embeddings')\n fig.write_image(f'{target_path}/{index:03d}.jpeg')\n index += 1\n\n\n\n\ndef get_project_dir():\n file_path = os.path.abspath(__file__) # /src/epoch_visualizations.py\n file_dir = os.path.dirname(file_path) # /src\n return os.path.dirname(file_dir) # \n\ndef get_visuals_dir():\n visuals_dir = os.path.join(get_project_dir(), 'models', 'visuals')\n return visuals_dir\n\nif __name__ == '__main__':\n instruction_parser = InstructionParser('VAE_conv2d_input_224_embedding_512.json')\n # reconstruction_dict = predict_epochs(instruction_parser)\n # save_image_reconstructions(reconstruction_dict)\n embedding_dict, labels = predict_embeddings(instruction_parser)\n save_PCA_images(embedding_dict, labels)","repo_name":"sgerloff/sustainable_deepfashion","sub_path":"src/epoch_visualizations.py","file_name":"epoch_visualizations.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"5955196723","text":"from django.urls import path\nfrom .views import StorePage, ProductDetailPage, wishlistProduct, wishlistProductDetail, \\\n submit_review, wishlistAjax\n\nurlpatterns = [\n path('', StorePage.as_view(), name='storepage'),\n path('product-detail/', ProductDetailPage.as_view(), name='productdetailpage'),\n path('submit_review/', submit_review, name='submit_review'),\n path('addtowishlist/', wishlistProduct, name='product_wishlist'),\n path('addtowishlistdetail/', wishlistProductDetail, name='product_detail_wishlist')\n]","repo_name":"deepeshanandparab/AthletiX-Sports-Shop-Django-REST","sub_path":"store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37713377248","text":"import streamlit as st\r\nimport numpy as np\r\nfrom sklearn.datasets import make_blobs\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import accuracy_score\r\nimport tensorflow as tf\r\nfrom tensorflow import keras \r\nimport numpy as np\r\nimport pandas as pd\r\nimport altair as alt\r\nimport plotly.express as px\r\nimport joblib\r\nimport cv2\r\nfrom PIL import Image\r\nimport io\r\nst.set_page_config(page_title=\"Plotting Demo\", page_icon=\"📈\")\r\nst.title('KNN')\r\ndef Bai01():\r\n st.header('Bai01 📣')\r\n N = 150\r\n centers = [[2, 3], [5, 5], [1, 8]]\r\n n_classes = len(centers)\r\n data, labels = make_blobs(N, centers=np.array(centers), random_state=1)\r\n nhom_0 = []\r\n nhom_1 = []\r\n nhom_2 = []\r\n for i in range(150):\r\n if labels[i] == 0:\r\n nhom_0.append([data[i, 0], data[i, 1], \"red\", 0])\r\n elif labels[i] == 1:\r\n nhom_1.append([data[i, 0], data[i, 1], \"green\", 1])\r\n else:\r\n nhom_2.append([data[i, 0], data[i, 1], \"blue\", 2])\r\n nhom_0 = np.array(nhom_0)\r\n nhom_1 = np.array(nhom_1)\r\n nhom_2 = np.array(nhom_2)\r\n\r\n df = pd.DataFrame((*nhom_0, *nhom_1, *nhom_2), columns=[\"x\", \"y\", \"color\", \"nhom\"])\r\n st.expander(\"Show data\").write(df)\r\n\r\n c = (\r\n alt.Chart(df)\r\n .mark_circle()\r\n .encode(\r\n x=\"x:Q\",\r\n y=\"y:Q\",\r\n color=alt.Color(\"color\", scale=None),\r\n tooltip=[\"x\", \"y\", \"color\", \"nhom\"],\r\n )\r\n )\r\n st.altair_chart(c, use_container_width=True)\r\n base = alt.Chart(df).encode(alt.X('X:O'))\r\n chart_test_count = base.mark_line().encode(alt.Y('Y:N'))\r\n chart_test_failures = base.mark_line().encode(alt.Y('Color:N'))\r\n res = train_test_split(data, labels, \r\n train_size=0.8,\r\n test_size=0.2,\r\n random_state=12)\r\n train_data, test_data, train_labels, test_labels = res \r\n knn = KNeighborsClassifier()\r\n knn.fit(train_data, train_labels) \r\n predicted = knn.predict(test_data)\r\n accuracy = accuracy_score(predicted, test_labels)\r\n st.latex('Do chinh xac: %.0f%%' % (accuracy*100))\r\ndef Bai02():\r\n st.header('Bai02 📣')\r\n mnist = keras.datasets.mnist \r\n (X_train, Y_train), (X_test, Y_test) = mnist.load_data() \r\n\r\n # 784 = 28x28\r\n RESHAPED = 784\r\n X_train = X_train.reshape(60000, RESHAPED)\r\n X_test = X_test.reshape(10000, RESHAPED) \r\n\r\n # now, let's take 10% of the training data and use that for validation\r\n (trainData, valData, trainLabels, valLabels) = train_test_split(X_train, Y_train,\r\n test_size=0.1, random_state=84)\r\n model = KNeighborsClassifier()\r\n model.fit(trainData, trainLabels)\r\n # save model, sau này ta sẽ load model để dùng\r\n def pickle_model(model):\r\n f = io.BytesIO()\r\n joblib.dump(model, f)\r\n return f \r\n # Đánh giá trên tập validation\r\n predicted = model.predict(valData)\r\n do_chinh_xac = accuracy_score(valLabels, predicted)\r\n st.write('Độ chính xác trên tập validation: %.0f%%' % (do_chinh_xac*100))\r\n # Đánh giá trên tập test\r\n predicted = model.predict(X_test)\r\n do_chinh_xac = accuracy_score(Y_test, predicted)\r\n st.write('Độ chính xác trên tập test: %.0f%%' % (do_chinh_xac*100))\r\n st.download_button(\"Download Model\", data=pickle_model(model), file_name=\"knn_mnist.pkl\")\r\ndef Bai03():\r\n st.header(\"Bai03 📣\")\r\n uploaded_file = st.file_uploader(\"OPEN MODEL\",type=['pkl'])\r\n if uploaded_file is not None:\r\n mnist = keras.datasets.mnist\r\n (X_train, Y_train), (X_test, Y_test) = mnist.load_data()\r\n index = None\r\n knn = joblib.load(uploaded_file)\r\n btn1 = st.button('Nhan dang')\r\n if btn1:\r\n col1, col2 = st.columns([3,2])\r\n index = np.random.randint(0, 9999, 100)\r\n digit = np.zeros((10*28,10*28), np.uint8)\r\n k = 0\r\n for x in range(0, 10):\r\n for y in range(0, 10):\r\n digit[x*28:(x+1)*28, y*28:(y+1)*28] = X_test[index[k]]\r\n k = k + 1 \r\n with col1:\r\n st.latex(\"IMAGE\")\r\n st.write()\r\n st.write()\r\n cv2.imwrite('digit.jpg', digit)\r\n image = Image.open('digit.jpg')\r\n st.image(image, caption='IMAGE')\r\n sample = np.zeros((100,28,28), np.uint8)\r\n for i in range(0, 100):\r\n sample[i] = X_test[index[i]]\r\n \r\n RESHAPED = 784\r\n sample = sample.reshape(100, RESHAPED) \r\n predicted = knn.predict(sample)\r\n k = 0\r\n with col2:\r\n st.latex(\"Ket qua nhan dang\")\r\n for x in range(0, 10):\r\n ketqua = ''\r\n for y in range(0, 10):\r\n ketqua = ketqua + '%3d' % (predicted[k])\r\n k = k + 1\r\n st.subheader(ketqua )\r\n \r\npage = st.sidebar.selectbox('Select page',['Bai01','Bai02','Bai03']) \r\nif page == 'Bai01':\r\n Bai01()\r\nelif page == 'Bai02':\r\n Bai02()\r\nelse :\r\n Bai03()\r\n","repo_name":"loctp2003/Streamlit_tutorial","sub_path":"pages/🔡KNN.py","file_name":"🔡KNN.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28195495336","text":"\"\"\"\nTests the ONS content search API\n\"\"\"\nfrom unittest import mock\n\nfrom unit.utils.search_test_app import SearchTestApp\nfrom unit.elasticsearch.elasticsearch_test_utils import mock_search_client, mock_hits_highlighted\n\nfrom dp_conceptual_search.ons.search.index import Index\nfrom dp_conceptual_search.search.search_type import SearchType\nfrom dp_conceptual_search.app.elasticsearch.elasticsearch_client_service import ElasticsearchClientService\n\n\nclass SearchDepartmentsApiTestCase(SearchTestApp):\n\n maxDiff = None\n\n @staticmethod\n def paginate():\n \"\"\"\n Calls paginate and makes some basic assertions\n :return:\n \"\"\"\n import random\n\n # Generate a random page number between 1 and 10\n current_page = random.randint(1, 10)\n\n # Generate a random page size between 11 and 20\n size = random.randint(11, 20)\n\n # Calculate correct start page number\n from_start = 0 if current_page <= 1 else (current_page - 1) * size\n\n return from_start, current_page, size\n\n @property\n def search_term(self):\n \"\"\"\n Mock search term to be used for testing\n :return:\n \"\"\"\n return \"Education\"\n\n @mock.patch.object(ElasticsearchClientService, '_init_client', mock_search_client)\n def test_content_query_search_called(self):\n \"\"\"\n Tests that the search method is called properly by the api for a content query\n :return:\n \"\"\"\n # Make the request\n # Set pagination params\n from_start, current_page, size = self.paginate()\n\n # Build params dict\n params = {\n \"q\": self.search_term,\n \"page\": current_page,\n \"size\": size\n }\n\n # URL encode\n url_encoded_params = self.url_encode(params)\n\n target = \"/search/departments?{q}\".format(q=url_encoded_params)\n\n # Make the request\n request, response = self.get(target, 200)\n\n # Build the expected query dict - note this should not change\n expected = {\n \"query\": {\n \"match\": {\n \"terms\": {\n \"query\": self.search_term,\n \"type\": \"boolean\"\n }\n }\n },\n \"from\": from_start,\n \"size\": size\n }\n\n # Assert search was called with correct arguments\n self.mock_client.search.assert_called_with(index=[Index.DEPARTMENTS.value], doc_type=[], body=expected,\n search_type=SearchType.DFS_QUERY_THEN_FETCH.value)\n\n data = response.json\n results = data['results']\n\n expected_hits_highlighted = mock_hits_highlighted()\n self.assertEqual(results, expected_hits_highlighted, \"returned hits should match expected\")\n\n\n\n","repo_name":"databill86/dp-conceptual-search","sub_path":"unit/api/search/test_search_departments.py","file_name":"test_search_departments.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13348492336","text":"import os\nimport torch\nimport random\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nfrom torch.utils import data\nfrom torch_geometric.data import Data\nfrom torch_geometric.data import InMemoryDataset\nfrom torch_geometric.data import Batch\nfrom itertools import repeat, product, chain\nfrom collections import Counter, deque\nfrom networkx.algorithms.traversal.breadth_first_search import generic_bfs_edges\n\ndef nx_to_graph_data_obj(g, center_id, allowable_features_downstream=None,\n allowable_features_pretrain=None,\n node_id_to_go_labels=None):\n \"\"\"\n Converts nx graph of PPI to pytorch geometric Data object.\n :param g: nx graph object of ego graph\n :param center_id: node id of center node in the ego graph\n :param allowable_features_downstream: list of possible go function node\n features for the downstream task. The resulting go_target_downstream node\n feature vector will be in this order.\n :param allowable_features_pretrain: list of possible go function node\n features for the pretraining task. The resulting go_target_pretrain node\n feature vector will be in this order.\n :param node_id_to_go_labels: dict that maps node id to a list of its\n corresponding go labels\n :return: pytorch geometric Data object with the following attributes:\n edge_attr\n edge_index\n x\n species_id\n center_node_idx\n go_target_downstream (only if node_id_to_go_labels is not None)\n go_target_pretrain (only if node_id_to_go_labels is not None)\n \"\"\"\n n_nodes = g.number_of_nodes()\n n_edges = g.number_of_edges()\n\n # nodes\n nx_node_ids = [n_i for n_i in g.nodes()] # contains list of nx node ids\n # in a particular ordering. Will be used as a mapping to convert\n # between nx node ids and data obj node indices\n\n x = torch.tensor(np.ones(n_nodes).reshape(-1, 1), dtype=torch.float)\n # we don't have any node labels, so set to dummy 1. dim n_nodes x 1\n\n center_node_idx = nx_node_ids.index(center_id)\n center_node_idx = torch.tensor([center_node_idx], dtype=torch.long)\n\n # edges\n edges_list = []\n edge_features_list = []\n for node_1, node_2, attr_dict in g.edges(data=True):\n edge_feature = [attr_dict['w1'], attr_dict['w2'], attr_dict['w3'],\n attr_dict['w4'], attr_dict['w5'], attr_dict['w6'],\n attr_dict['w7'], 0, 0] # last 2 indicate self-loop\n # and masking\n edge_feature = np.array(edge_feature, dtype=int)\n # convert nx node ids to data obj node index\n i = nx_node_ids.index(node_1)\n j = nx_node_ids.index(node_2)\n edges_list.append((i, j))\n edge_features_list.append(edge_feature)\n edges_list.append((j, i))\n edge_features_list.append(edge_feature)\n\n # data.edge_index: Graph connectivity in COO format with shape [2, num_edges]\n edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)\n\n # data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]\n edge_attr = torch.tensor(np.array(edge_features_list),\n dtype=torch.float)\n\n try:\n species_id = int(nx_node_ids[0].split('.')[0]) # nx node id is of the form:\n # species_id.protein_id\n species_id = torch.tensor([species_id], dtype=torch.long)\n except: # occurs when nx node id has no species id info. For the extract\n # substructure context pair transform, where we convert a data obj to\n # a nx graph obj (which does not have original node id info)\n species_id = torch.tensor([0], dtype=torch.long) # dummy species\n # id is 0\n\n # construct data obj\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)\n data.species_id = species_id\n data.center_node_idx = center_node_idx\n\n if node_id_to_go_labels: # supervised case with go node labels\n # Construct a dim n_pretrain_go_classes tensor and a\n # n_downstream_go_classes tensor for the center node. 0 is no data\n # or negative, 1 is positive.\n downstream_go_node_feature = [0] * len(allowable_features_downstream)\n pretrain_go_node_feature = [0] * len(allowable_features_pretrain)\n if center_id in node_id_to_go_labels:\n go_labels = node_id_to_go_labels[center_id]\n # get indices of allowable_features_downstream that match with elements\n # in go_labels\n _, node_feature_indices, _ = np.intersect1d(\n allowable_features_downstream, go_labels, return_indices=True)\n for idx in node_feature_indices:\n downstream_go_node_feature[idx] = 1\n # get indices of allowable_features_pretrain that match with\n # elements in go_labels\n _, node_feature_indices, _ = np.intersect1d(\n allowable_features_pretrain, go_labels, return_indices=True)\n for idx in node_feature_indices:\n pretrain_go_node_feature[idx] = 1\n data.go_target_downstream = torch.tensor(np.array(downstream_go_node_feature),\n dtype=torch.long)\n data.go_target_pretrain = torch.tensor(np.array(pretrain_go_node_feature),\n dtype=torch.long)\n\n return data\n\ndef graph_data_obj_to_nx(data):\n \"\"\"\n Converts pytorch geometric Data obj to network x data object.\n :param data: pytorch geometric Data object\n :return: nx graph object\n \"\"\"\n G = nx.Graph()\n\n # edges\n edge_index = data.edge_index.cpu().numpy()\n edge_attr = data.edge_attr.cpu().numpy()\n n_edges = edge_index.shape[1]\n for j in range(0, n_edges, 2):\n begin_idx = int(edge_index[0, j])\n end_idx = int(edge_index[1, j])\n w1, w2, w3, w4, w5, w6, w7, _, _ = edge_attr[j].astype(bool)\n if not G.has_edge(begin_idx, end_idx):\n G.add_edge(begin_idx, end_idx, w1=w1, w2=w2, w3=w3, w4=w4, w5=w5,\n w6=w6, w7=w7)\n\n # # add center node id information in final nx graph object\n # nx.set_node_attributes(G, {data.center_node_idx.item(): True}, 'is_centre')\n\n return G\n\nclass BioDataset(InMemoryDataset):\n def __init__(self,\n root,\n data_type,\n empty=False,\n transform=None,\n pre_transform=None,\n pre_filter=None):\n \"\"\"\n Adapted from qm9.py. Disabled the download functionality\n :param root: the data directory that contains a raw and processed dir\n :param data_type: either supervised or unsupervised\n :param empty: if True, then will not load any data obj. For\n initializing empty dataset\n :param transform:\n :param pre_transform:\n :param pre_filter:\n \"\"\"\n self.root = root\n self.data_type = data_type\n\n super(BioDataset, self).__init__(root, transform, pre_transform, pre_filter)\n if not empty:\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_file_names(self):\n #raise NotImplementedError('Data is assumed to be processed')\n if self.data_type == 'supervised': # 8 labelled species\n file_name_list = ['3702', '6239', '511145', '7227', '9606', '10090', '4932', '7955']\n else: # unsupervised: 8 labelled species, and 42 top unlabelled species by n_nodes.\n file_name_list = ['3702', '6239', '511145', '7227', '9606', '10090',\n '4932', '7955', '3694', '39947', '10116', '443255', '9913', '13616',\n '3847', '4577', '8364', '9823', '9615', '9544', '9796', '3055', '7159',\n '9031', '7739', '395019', '88036', '9685', '9258', '9598', '485913',\n '44689', '9593', '7897', '31033', '749414', '59729', '536227', '4081',\n '8090', '9601', '749927', '13735', '448385', '457427', '3711', '479433',\n '479432', '28377', '9646']\n return file_name_list\n\n\n @property\n def processed_file_names(self):\n return 'geometric_data_processed.pt'\n\n def download(self):\n raise NotImplementedError('Must indicate valid location of raw data. '\n 'No download allowed')\n\n def process(self):\n raise NotImplementedError('Data is assumed to be processed')\n\nif __name__ == \"__main__\":\n \n\n\n root_supervised = 'dataset/supervised'\n\n d_supervised = BioDataset(root_supervised, data_type='supervised')\n\n print(d_supervised)\n\n root_unsupervised = 'dataset/unsupervised'\n d_unsupervised = BioDataset(root_unsupervised, data_type='unsupervised')\n\n print(d_unsupervised)\n\n\n","repo_name":"snap-stanford/pretrain-gnns","sub_path":"bio/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","stars":868,"dataset":"github-code","pt":"81"} +{"seq_id":"2542038726","text":"dico = {}\ndico['firstname'] = 'ethan'\ndico['name'] = 'Coulon'\ndico['promotion'] = '2022'\ndico['group'] = '202'\n\nbinome = {}\nbinome['firstname'] = 'Florian'\nbinome['name'] = 'Mercklé'\nbinome['promotion'] = '2022'\nbinome['group'] = '202'\n\ntuplets = {\n \"name\": dico['name'],\n \"firstname\": dico['firstname'],\n \"promotion\": dico['promotion'],\n \"group\": dico['group']\n}\n\ntuplets2 = {\n \"name\": binome['name'],\n \"firstname\": binome['firstname'],\n \"promotion\": binome['promotion'],\n \"group\": binome['group']\n}\n\nprint(\"les etudiants formants le binôme sont:\")\nprint(\"-L'étudiant {} {} du groupee {}\".format(dico['name'], dico['firstname'], dico['group']))\nprint(\"-L'étudiant {} {} du group {}\".format(binome['name'], binome['firstname'], binome['group']))","repo_name":"Ethan68000/TP-r1.07","sub_path":"tp4/tp4ex8.9.py","file_name":"tp4ex8.9.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15163454164","text":"import os\nimport copy\nimport json\nfrom hashlib import sha256\nfrom glob import glob\n\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom torch.utils.data import DataLoader\n\nfrom torch import nn\nfrom functools import partial\n\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\nfrom torchvision.ops import misc, feature_pyramid_network\nfrom torchvision.models import resnet\nfrom torchvision.models.detection import RetinaNet, FCOS, backbone_utils\nfrom torchvision.models.detection.retinanet import RetinaNetHead, _default_anchorgen\n\nfrom torchmetrics.detection import MeanAveragePrecision\n\n\ndef dataset_formatting(data):\n \"\"\"\n Formats data from dataset object to model compatiable data structure.\n \"\"\"\n images, targets = [], []\n \n for d in data:\n images.append(d[0])\n targets.append(d[1])\n \n return images, targets\n\nclass RegionProposalNetwork():\n \"\"\" \n Class that implments the region proposal network.\n\tAttributes:\n device [str]: The device that model is store and computations occur.\n\tMethods:\n parameters: Get model parameters.\n to: Move model and run all future computations to a spesfied device.\n save: Saves model weights to a file in a spesified directroy.\n load: Loads model weights from spesified file. \n fit: Trains the model on a given input, evaluating after every epoch.\n propose: Runs inference on the model given an input.\n preprocess: Tranforms images to model compatable format.\n evaluate: Evaluates the model on a given input and returns metrics.\n update_nms_thresholds: Update the non-maximum suppression thresholds.\n\t\"\"\"\n def __init__(self, \n model_type=None, \n backbone_type=None,\n load_path=None,\n trainable_backbone_layers=None, \n pretrained_backbone=True,\n progress=False, \n **kwargs):\n \"\"\"\n Initializes an instance of RegionProposalNetwork.\n\n Parameters:\n\t\t\tmodel_type [str]: String representing the desired object detection model. Not used if load_path is spesified.\n Avaiable options are 'retinanet' and 'fcos'. (Default: None)\n backbone_type [str]: String representing the desired resnet backbone for detector. Not used if load_path is spesified.\n Avaiable options are 'resnet18', 'resnet34', 'resnet50', 'resnet101', and 'resnet152'. (Default: None)\n load_path [str]: Optional path to file containing model weights to load into model. (Default: None)\n trainable_backbone_layers [int]: Intiger between 0 and 5 indicating the number of trainable layers in \n backbone. 5 means all layers are trainable. If backbone is not \n pretrained do not use this argument. If backbone is pretrained \n and argument is not spesified it defaults to 3.\n pretrained_backbone [bool]: If True the backbone is loaded with pretrained weights on imagenet dataset. (Default: True)\n iou_threshold [float]: Value in the range (0,1] which respensted the maximum detection overlap. (Default: 0.5)\n score_threshold [float]: Value in the range (0,1) which respensted the minimum detection score. (Default: 0.05)\n progress [bool]: If True the backbone pretrained weights download progress bar is displayed. (Default: False) \n\t\t\tkwargs: Dictionary of the arguments to be passed to the detector API.\n \"\"\"\n if load_path:\n self.load(load_path)\n else:\n self._model = self.__build_model(model_type, backbone_type, trainable_backbone_layers, pretrained_backbone, progress, **kwargs)\n self._model_metadata = {\"model\": model_type, \n \"backbone\": backbone_type,\n \"parameters\": kwargs} \n\n self.to(torch.device('cpu')) # defaults to CPU.\n \n def __build_model(self, model_type, backbone_type, trainable_backbone_layers=None, pretrained_backbone=True, progress=False, **kwargs):\n \"\"\"\n Builds spesified model.\n\n Reference contructor for paramemter spesifications.\n \"\"\"\n if pretrained_backbone and trainable_backbone_layers is None:\n trainable_backbone_layers = 3\n\n backbone = self.__resnet_backbone(model_type, \n backbone_type, \n trainable_backbone_layers, \n pretrained_backbone, \n progress)\n\n if model_type == \"retinanet\":\n model = self.__retinanet(backbone, **kwargs)\n elif model_type == \"fcos\":\n model = self.__fcos(backbone, **kwargs)\n else:\n raise ValueError(f\"Model type '{model_type}' is not an available model type. Avaiable options are 'retinanet' and 'fcos'.\")\n\n model.float()\n\n return model\n\n def __retinanet(self, backbone, **kwargs):\n \"\"\"\n Builds RetinaNet model.\n\n Parameters:\n\t\t\tbackbone [torch.nn]: Model to be used as the backbone for the RetinaNet object detector.\n kwargs: Dictionary of arguments to be passed to the RetinaNet detector API.\n API Docs: https://pytorch.org/vision/0.12/_modules/torchvision/models/detection/retinanet.html\n\n Returns:\n [torchvision.models.detection.retinanet.RetinaNet]: RetinaNet model with one object detection.\n \"\"\"\n anchor_generator = _default_anchorgen()\n\n head = RetinaNetHead(\n backbone.out_channels,\n anchor_generator.num_anchors_per_location()[0],\n num_classes=2,\n norm_layer=partial(nn.GroupNorm, 32),\n )\n\n head.regression_head._loss_type = \"giou\"\n return RetinaNet(backbone, \n num_classes=2, \n anchor_generator=anchor_generator, \n head=head,\n **kwargs)\n\n def __fcos(self, backbone, **kwargs):\n \"\"\"\n Builds FCOS model.\n\n Parameters:\n\t\t\tbackbone [torch.nn]: Model to be used as the backbone for the FCOS object detector.\n kwargs: Dictionary of arguments to be passed to the FCOS detector API.\n API Docs: https://pytorch.org/vision/main/_modules/torchvision/models/detection/fcos.html\n\n Returns:\n [torchvision.models.detection.fcos.FCOS]: FCOS model with one object detection.\n \"\"\"\n return FCOS(backbone, num_classes=2, **kwargs)\n\n def __resnet_backbone(self, object_detector, resnet_type, trainable_backbone_layers=None, pretrained_backbone=True, progress=False):\n \"\"\"\n Builds ResNet backbones. \n\n Parameters:\n\t\t\tobject_detector [str]: String representing the desired object detection model.\n Avaiable options are 'retinanet' and 'fcos'.\n resnet_type [str]: String representing the desired resnet backbone for detector.\n Avaiable options are 'resnet18', 'resnet34', 'resnet50', 'resnet101', and 'resnet152'.\n trainable_backbone_layers [int]: Intiger between 0 and 5 indicating the number of trainable layers in \n backbone. 5 means all layers are trainable. If backbone is not \n pretrained do not use this argument. If backbone is pretrained \n and argument is not spesified it defaults to 3.\n pretrained_backbone [bool]: If True the backbone is loaded with pretrained weights on imagenet dataset. (Default: True)\n progress [bool]: If True the backbone pretrained weights download progress bar is displayed. (Default: False) \n \n Code inspired and influenced by:\n https://github.com/pytorch/vision/blob/ce257ef78b9da0430a47d387b8e6b175ebaf94ce/torchvision/models/detection/fcos.py#L676-L769\n https://github.com/pytorch/vision/blob/ce257ef78b9da0430a47d387b8e6b175ebaf94ce/torchvision/models/detection/retinanet.py#L826-L895\n \"\"\"\n if pretrained_backbone and trainable_backbone_layers is None:\n trainable_backbone_layers = 3\n\n if resnet_type == \"resnet18\":\n weights = resnet.ResNet18_Weights.IMAGENET1K_V1\n channel_out = 512\n backbone = resnet.resnet18\n elif resnet_type == \"resnet34\":\n weights = resnet.ResNet34_Weights.IMAGENET1K_V1\n channel_out = 512\n backbone = resnet.resnet34\n elif resnet_type == \"resnet50\":\n weights = resnet.ResNet50_Weights.IMAGENET1K_V2\n channel_out = 2048\n backbone = resnet.resnet50\n elif resnet_type == \"resnet101\":\n weights = resnet.ResNet101_Weights.IMAGENET1K_V2\n channel_out = 2048\n backbone = resnet.resnet101\n elif resnet_type == \"resnet152\":\n weights = resnet.ResNet152_Weights.IMAGENET1K_V2\n channel_out = 2048\n backbone = resnet.resnet152\n else:\n raise ValueError(\"The provided resnet type does is not supported. Avaiable options are 'resnet18', 'resnet34', 'resnet50', 'resnet101', and 'resnet152'.\")\n \n backbone_weights = None\n if pretrained_backbone:\n backbone_weights = weights\n\n trainable_backbone_layers = backbone_utils._validate_trainable_layers(pretrained_backbone, trainable_backbone_layers, 5, 3)\n\n if object_detector == \"retinanet\":\n backbone = backbone(weights=backbone_weights, progress=progress)\n extra_block = feature_pyramid_network.LastLevelP6P7(channel_out, 256)\n elif object_detector == \"fcos\":\n norm_layer = misc.FrozenBatchNorm2d if pretrained_backbone else nn.BatchNorm2d\n backbone = backbone(weights=backbone_weights, progress=progress, norm_layer=norm_layer)\n extra_block = feature_pyramid_network.LastLevelP6P7(256, 256)\n else:\n raise ValueError(\"The provided object detector type is not supported. Avaiable options are retinanet and fcos.\")\n\n backbone = backbone_utils._resnet_fpn_extractor(\n backbone, trainable_backbone_layers, returned_layers=[2, 3, 4], extra_blocks=extra_block)\n\n return backbone\n\n def __build_dataloader(self, dataset, shuffle=False, batch_size=1, num_workers=0):\n \"\"\"\n Creates dataloader for a dataset.\n\n Parameters:\n dataset [torch.utils.data.Dataset]: \n shuffle [bool]: Shuffle data while loading. (Default: False)\n batch_size [int]: Number of images to load in each batch. (Default: 1)\n num_workers [int]: Number of workers to use for dataloading. (Default: 0)\n \n Returns:\n [torch.utils.data.DataLoader]: Dataloader object for the provided dataset.\n \"\"\"\n return DataLoader(dataset, \n batch_size=batch_size, \n shuffle=shuffle, \n num_workers=num_workers,\n collate_fn=dataset_formatting,\n pin_memory=True,\n persistent_workers=num_workers>0)\n\n def parameters(self):\n \"\"\"\n Returns the model parameters.\n\n Returns:\n [generator]: Generator containing torch.nn.parameter.Parameter objects for the detector.\n \"\"\"\n return self._model.parameters()\n\n def to(self, device):\n \"\"\"\n Loads and performs computations on the model and input data to specified device.\n\n Parameters:\n device [str]: Name of the device to load model too.\n \"\"\"\n try:\n self._model.to(device)\n self.device = device\n except Exception as e:\n raise Exception(e) \n\n def save(self, save_path):\n \"\"\"\n Save the model to specified path.\n \n Parameters:\n save_path [str]: Path to directory to save model files too.\n\n Returns:\n [str]: Path to the folder the model files were saved too.\n \"\"\" \n save_name = save_path.split(os.path.sep)[-1]\n\n if not os.path.isdir(save_path):\n os.makedirs(save_path)\n\n # save model weights.\n model_weights = self._model.state_dict()\n torch.save(model_weights, os.path.join(save_path, f\"{save_name}_weights.pth\"))\n \n # save model metadata.\n self._model_metadata[\"weight_hash\"] = sha256(str(model_weights).encode()).hexdigest()\n with open(os.path.join(save_path, f\"{save_name}_metadata.json\"), \"w\") as f:\n json.dump(self._model_metadata, f, indent=1)\n \n return save_path\n\n def load(self, load_path):\n \"\"\"\n Load the model from spesified path.\n\n Parameters:\n load_path [str]: Path to folder to load model from. Folder must contain both the .json metadata and .pth weight file.\n \"\"\"\n try:\n metadata_path = glob(os.path.join(load_path, \"*.json\"))[0]\n with open(metadata_path, \"r\") as f:\n self._model_metadata = json.load(f)\n except:\n raise Exception(f\"Encountered error while extracting model metadata from {load_path}.\")\n\n self._model = self.__build_model(self._model_metadata[\"model\"],\n self._model_metadata[\"backbone\"], \n **self._model_metadata[\"parameters\"])\n try:\n weight_path = glob(os.path.join(load_path, \"*.pth\"))[0]\n model_weights = torch.load(weight_path)\n \n if sha256(str(model_weights).encode()).hexdigest() != self._model_metadata[\"weight_hash\"]:\n raise Exception(\"Model weights and metadata is not for the same model.\")\n\n self._model.load_state_dict(model_weights)\n except:\n raise Exception(f\"Encountered error while loading model weights from {load_path}.\")\n\n def fit(self, epochs, datasets, batch_size, optimizer, save_path, checkpoints=0, progress=True, num_workers=0):\n \"\"\"\n Fits the model to the training dataset and evaluates on the validation dataset.\n\n Parameters:\n epochs [int]: Number of training iterations over the data.\n datasets [tuple]: A tuple containing the training dataset as the first element and the validation dataset \n as the second element.\n batch_size [int]: Number of images to batch for training and evaluaton.\n optimizer [torch.optim|tuple]: Either an optimizer or a tuple contain a learning rate scheduler as the first element \n and an optimizer as the second element.\n save_path [str]: Path to save model checkpoints.\n checkpoints [int]: Integer N reprisenting after every N epochs to create a model checkpoint. If 0, only save the best model. (Default: 0)\n progress [bool]: Report training progress. (Default: True)\n num_workers [int]: Number of workers to use for DataLoaders. (Default: 0)\n\n Returns:\n [dict]: Dictionary of model training history.\n \"\"\"\n if not os.path.isdir(save_path):\n os.makedirs(save_path)\n\n train_dataset, valid_dataset = datasets\n \n train_loader = self.__build_dataloader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=num_workers)\n valid_loader = self.__build_dataloader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=num_workers)\n\n sched, optim = optimizer if isinstance(optimizer, tuple) else (None, optimizer)\n\n train_hist = {\n \"train_loss\": [],\n \"train_map\": [],\n \"train_map_50\": [],\n \"train_map_75\": [],\n \"valid_map\": [],\n \"valid_map_50\": [],\n \"valid_map_75\": []\n }\n\n best_acc = -np.inf\n best_model_wts = None\n\n for e in range(epochs):\n self._model.train()\n\n if progress:\n print(f\"Epoch: {e+1}\")\n\n train_e_loader = tqdm(train_loader)\n train_e_loader.set_description(desc=f\"Training loss: {np.nan}\")\n else:\n train_e_loader = train_loader\n \n loss = 0\n for i, (X, y) in enumerate(train_e_loader):\n X = [x.to(self.device) for x in X]\n y = [{\"boxes\": t.to(self.device), \"labels\": torch.ones(len(t), dtype=torch.int64).to(self.device)} for t in y]\n\n loss_dict = self._model(X, y)\n batch_loss = sum(b_loss for b_loss in loss_dict.values())\n loss += batch_loss.item()\n\n optim.zero_grad()\n batch_loss.backward()\n optimizer.step()\n\n if progress:\n train_e_loader.set_description(desc=f\"Training loss: {loss/(i+1):.4f}\")\n\n train_metrics = self.evaluate(train_loader, batch_size, progress=progress, num_workers=num_workers)\n valid_metrics = self.evaluate(valid_loader, batch_size, progress=progress, num_workers=num_workers)\n \n if valid_metrics[\"map\"] > best_acc:\n best_acc = valid_metrics[\"map\"]\n best_model_wts = copy.deepcopy(self._model.state_dict())\n\n train_hist[\"train_loss\"].append(loss/len(train_e_loader))\n train_hist[\"train_map\"].append(train_metrics[\"map\"])\n train_hist[\"train_map_50\"].append(train_metrics[\"map_50\"])\n train_hist[\"train_map_75\"].append(train_metrics[\"map_75\"])\n \n train_hist[\"valid_map\"].append(valid_metrics[\"map\"])\n train_hist[\"valid_map_50\"].append(valid_metrics[\"map_50\"])\n train_hist[\"valid_map_75\"].append(valid_metrics[\"map_75\"])\n \n if progress:\n print(\"Training Results:\")\n print(f\"\\tmAP@.50::.05::.95 - {train_hist['train_map'][-1]}\")\n print(f\"\\tmAP@.50 - {train_hist['train_map_50'][-1]}\")\n print(f\"\\tmAP@.75 - {train_hist['train_map_75'][-1]}\")\n print(\"Validation Results:\")\n print(f\"\\tmAP@.50::.05::.95 - {train_hist['valid_map'][-1]}\")\n print(f\"\\tmAP@.50 - {train_hist['valid_map_50'][-1]}\")\n print(f\"\\tmAP@.75 - {train_hist['valid_map_75'][-1]}\")\n\n if checkpoints > 0:\n if e % checkpoints == 0:\n self.save(os.path.join(save_path, f\"checkpoint_{e+1}\"))\n\n if sched:\n sched.step()\n \n self._model.load_state_dict(best_model_wts)\n file_path = self.save(os.path.join(save_path, \"best_model\"))\n\n if progress:\n print(f\"Saved best model to '{file_path}' which achived a validation mAP@.5:.05:.95 of {best_acc:.4f}.\")\n\n return train_hist\n\n def propose(self, X):\n \"\"\"\n Proposes regions on the input data. \n\n Parameters:\n X [list|torch.Tensor]: Either a list of tensor objects or a single tensor of shape [N, C, H, W].\n\n Returns:\n [list]: List of dictionaries with region proposals for each image in X.\n \"\"\"\n self._model.eval()\n with torch.no_grad():\n X = [x.to(self.device) for x in X]\n y_hats = self._model(X)\n return y_hats\n\n def preprocess(self, image_paths):\n \"\"\"\n Preprocesses a list of images paths to a list of tensors that are compatable with the model.\n\n Paramemeters:\n image_paths [list]: List of image paths.\n\n Return:\n [list]: A list of image tensors loaded to the same device as the model.\n \"\"\"\n images = []\n\n transform = transforms.ToTensor()\n for image_path in image_paths:\n image = Image.open(image_path)\n image = transform(image)\n image.to(self.device)\n images.append(image)\n \n return images\n\n def evaluate(self, dataset, batch_size=1, num_workers=0, progress=False):\n \"\"\"\n Evaluates the model on a dataset.\n\n Parameters:\n dataset [torch.utils.data.Dataset|torch.utils.data.DataLoader]: Dataset or dataloader to use for model evaluation.\n batch_size [int]: Number of images to batch for each evaluaton. (Default: 1)\n progress [bool]: Report evaluation progress. (Default: True)\n num_workers [int]: Number of workers to use for DataLoaders. (Default: 0)\n\n Returns:\n [dict]: Dictionary of evaluation results.\n \"\"\"\n self._model.eval()\n\n if isinstance(dataset, torch.utils.data.Dataset):\n dataloader = self.__build_dataloader(dataset, shuffle=False, batch_size=batch_size, num_workers=num_workers)\n else:\n dataloader = dataset\n\n if progress:\n dataloader = tqdm(dataloader)\n\n metrics = MeanAveragePrecision()\n with torch.no_grad():\n for X, y in dataloader:\n X = [x.to(self.device) for x in X]\n y = [{\"boxes\": t.to(self.device), \"labels\": torch.ones(len(t), dtype=torch.int64).to(self.device)} for t in y]\n \n y_hats = self.propose(X)\n metrics.update(y_hats, y)\n\n return {k: v.item() for k, v in metrics.compute().items()}\n\n def update_nms_thresholds(self, iou_threshold, score_threshold):\n \"\"\"\n Sets model non-maximum suppression thresholds.\n\n Parameters:\n iou_threshold [float]: Intersection over union threshold.\n score_threshold [float]: Model detection score threshold.\n \"\"\"\n self._model.nms_thresh = iou_threshold\n self._model.score_thresh = score_threshold\n \n self._model_metadata[\"parameters\"][\"nms_thresh\"] = iou_threshold\n self._model_metadata[\"parameters\"][\"score_thresh\"] = score_threshold\n","repo_name":"ellinj2/Face_Anomaly_Detection","sub_path":"adifi/region_proposal/region_proposal_network.py","file_name":"region_proposal_network.py","file_ext":"py","file_size_in_byte":22411,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"31177400769","text":"from rest_framework import serializers\n\nfrom .models import CtsResult, Job, LitResult\n\n\nclass JobSerializer(serializers.ModelSerializer):\n revision_hash = serializers.CharField(source=\"revision.hash\", read_only=True)\n\n class Meta:\n model = Job\n fields = (\n \"pk\",\n \"status\",\n \"revision_hash\",\n \"run_lit_all\",\n \"run_cts_allocations\",\n \"run_cts_api\",\n \"run_cts_basic\",\n \"run_cts_atomics\",\n \"run_cts_buffers\",\n \"run_cts_commonfns\",\n \"run_cts_compiler\",\n \"run_cts_computeinfo\",\n \"run_cts_contractions\",\n \"run_cts_device_partition\",\n \"run_cts_events\",\n \"run_cts_geometrics\",\n \"run_cts_half\",\n \"run_cts_integer_ops\",\n \"run_cts_mem_host_flags\",\n \"run_cts_multiple_device_context\",\n \"run_cts_printf\",\n \"run_cts_profiling\",\n \"run_cts_relationals\",\n \"run_cts_select\",\n \"run_cts_thread_dimensions\",\n \"run_cts_vectors\",\n \"run_cts_c11_atomics\",\n \"run_cts_device_execution\",\n \"run_cts_non_uniform_work_group\",\n \"run_cts_generic_address_space\",\n \"run_cts_subgroups\",\n \"run_cts_workgroups\",\n \"run_cts_pipes\",\n \"run_cts_device_timer\",\n \"run_cts_spirv_new\",\n \"run_cts_math_brute_force\",\n \"run_cts_SVM\",\n \"run_cts_clCopyImage\",\n \"run_cts_clFillImage\",\n \"run_cts_clGetInfo\",\n \"run_cts_clReadWriteImage\",\n \"run_cts_kernel_image_methods\",\n \"run_cts_kernel_read_write\",\n \"run_cts_samplerlessReads\",\n )\n\n\nclass JobDeserializer(serializers.ModelSerializer):\n class Meta:\n model = Job\n fields = (\"pk\", \"status\", \"status_details\")\n\n\nclass LitResultSerializer(serializers.ModelSerializer):\n class Meta:\n model = LitResult\n fields = (\"test_path\", \"passing\")\n\n\nclass CtsResultSerializer(serializers.ModelSerializer):\n class Meta:\n model = CtsResult\n fields = (\n \"test_category\",\n \"test_name\",\n \"passing\",\n \"timedout\",\n \"start_time\",\n \"end_time\",\n \"standard_output\",\n \"standard_error\",\n \"test_executable\",\n \"test_arguments\",\n \"suite_version\",\n \"igc_version\",\n \"neo_version\",\n \"dump\",\n )\n","repo_name":"KhronosGroup/SPIRV-Backend-Testing","sub_path":"website/dispatcher/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"5043822206","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ndef main():\n setuptools.setup(name=\"hoap\",\n version=\"0.1\",\n description=\"A Python/Hy module to with pointer-like structures and memory heap.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Atell Krasnopolski\",\n url=\"https://github.com/gojakuch/hoap\",\n packages=setuptools.find_packages(),\n license=\"MIT License\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n ext_modules=[setuptools.Extension(\"hoap\", [\"hoap.c\"])])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gojakuch/hoap","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2542700387","text":"from flask import Flask, render_template, request\nimport time\n\napp = Flask(__name__)\n\ndef quicksort(arr):\n if len(arr) <= 1:\n return arr\n else:\n pivot = arr[0]\n less = [x for x in arr[1:] if x <= pivot]\n greater = [x for x in arr[1:] if x > pivot]\n return quicksort(less) + [pivot] + quicksort(greater)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n arraySortida = None\n tempoTomado = None\n\n if request.method == 'POST':\n input_array = request.form['input_array']\n try:\n input_array = [int(x) for x in input_array.split()]\n start_time = time.time()\n # time.sleep(1) #-> Descomentar para testar tempo caso necessario sleep é para \"dormir\" o tempo para provar q existe a execução levando em cosideração q seu tempo é em segundos\n arraySortida = quicksort(input_array.copy())\n end_time = time.time()\n tempoTomado = (end_time - start_time) * 1000 # Convert to milliseconds\n except ValueError:\n # error_message = \"Por favor, insira apenas números inteiros separados por espaços.\" #-> Não necessita pois existe validação no front para evitar esse problema\n # return render_template('index.html', error_message=error_message) #-> Mesma coisa pra linha de cima\n return render_template('index.html')\n\n return render_template('index.html', arraySortida=arraySortida, tempoTomado=tempoTomado)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Colombao/PythonFlask","sub_path":"quickSort.py","file_name":"quickSort.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32933026498","text":"'''\nAuthor : Zhu Honglin\nDate : 2020-09-13 19:39:31\nLastEditTime : 2020-09-13 20:09:52\n'''\n\nfrom typing import List\nimport collections\n\nclass Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n queue = collections.deque()\n rst = []\n\n for i in range(k-1):\n while len(queue) > 0 and nums[queue[-1]] < nums[i]:\n queue.pop()\n queue.append(i)\n \n for i in range(k-1, len(nums)):\n while len(queue) > 0 and queue[0] <= i-k:\n queue.popleft()\n\n while len(queue) > 0 and nums[queue[-1]] < nums[i]:\n queue.pop()\n \n queue.append(i)\n rst.append(nums[queue[0]])\n \n return rst","repo_name":"imzhuhl/algorithm-notes","sub_path":"exercise/leetcode/0239/0239.py","file_name":"0239.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42867228855","text":"import sys\r\nfrom collections import Counter\r\ninput = sys.stdin.readline\r\n\r\nn, m, b = map(int, input().split())\r\ndata = []\r\nfor _ in range(n):\r\n data.extend(list(map(int,input().split())))\r\ndata = Counter(data)\r\nh = 0\r\nt = 0\r\n\r\nans_t = 1e9\r\nans_h = 0\r\nwhile h <= 256:\r\n c1 = c2 = 0\r\n for k, v in data.items():\r\n if k < h:\r\n c1 += (h-k) * v\r\n elif k > h:\r\n c2 += (k-h) * v\r\n if (b - c1 + c2) < 0:\r\n break\r\n t = c1 + 2*c2\r\n if t <= ans_t:\r\n ans_t = t\r\n ans_h = h\r\n h += 1\r\n else:\r\n break\r\nprint(ans_t, ans_h)\r\n","repo_name":"iblug/Baekjoon","sub_path":"백준/Silver/18111. 마인크래프트/마인크래프트.py","file_name":"마인크래프트.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33901098314","text":"#!/usr/bin/env python3\n\nimport cherrypy\nimport os\n\nclass app():\n \n @cherrypy.expose\n def index(self):\n return open(\"thing.html\")\n\nif __name__ == \"__main__\":\n\n application = app()\n\n ABSPATH = os.path.abspath(os.getcwd())\n\n injectedConfig = dict()\n\n injectedConfig[\"/\"] = { \"tools.staticdir.on\": True, \"tools.staticdir.dir\": ABSPATH }\n\n cherrypy.quickstart(application, \"/\", injectedConfig)\n","repo_name":"mason-1009/pi-door","sub_path":"src/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72849692106","text":"'''\ninput output 첫째 줄 : 노드의 개수, 간선의 개수\n6 11 0 둘째 줄 : 시작 노드 번호\n1 2 세번째 줄부터 : 각 노드에 연결되어 있는 노드에 대한 정보 \n1 2 2 3 (1 2 2) : 1번 노드에서 2번 노드로 가는 비용이 2라는 의미\n1 3 5 1\n1 4 1 2\n2 3 3 4\n2 4 2\n3 2 3\n3 6 5\n4 3 3\n4 5 1\n5 3 1\n5 6 2\n'''\n\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\n# 노드의 개수\nn,m = map(int, input().split())\nstart = int(input())\n\n# 각 노드에 연결되어 있는 노드에 대한 정보를 담는 리스트를 만들기\ngraph = [ [] for _ in range(n+1)] # n+1을 하는 이유는 각 노드에 숫자에 맞게 할당하기 위해서\n# 방문한 적 있는지 체크하는 리스트\nvisited = [False] * (n + 1)\n# 최단 거리 테이블을 모두 무한으로 초기화\ndistance = [INF] * (n + 1)\n\n# 모든 간선에 대한 정보 입력받기\nfor i in range(m) :\n a, b, c = map(int, input().split())\n graph[a].append((b,c))\n\ndef get_smallest_node() :\n min_value = INF\n index = 0\n for i in range(1, n+1) :\n if distance[i] < min_value and not visited[i] :\n min_value = distance[i]\n index = i\n\n return index\n\ndef dijkstra(start) :\n\n distance[start] = 0\n visited[start] = True\n for i in graph[start] :\n distance[i[0]] = i[1]\n\n # 시작 노드를 제외한 전체 n-1개의 노드에 대해 반복\n for i in range(n-1) :\n index = get_smallest_node()\n visited[index] = True\n\n\n for j in graph[index] :\n cost = distance[index] + j[1]\n if cost < distance[j[0]] :\n distance[j[0]] = cost\n\n\ndijkstra(start)\n\nfor i in range(1, n+1) :\n if distance[i] == INF :\n print(\"INF\")\n else : print(distance[i])\n\n'''\n시간 복잡도 O(v^2) 단, v는 노드 개수\n전체 노드의 수가 5천개 이하라면 일반적으로 이 코드로 해결 가능\n하지만, 노드 개수가 1만개를 넘어서면 이 코드로는 문제 해결이 어렵다.\n'''","repo_name":"KymCat/CodingTestStudy","sub_path":"알고리즘/dijkstra_list.py","file_name":"dijkstra_list.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25661432032","text":"from tkinter import *\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport threading\nimport pandas as pd\nimport serial\n\nnome='data'\ntimestamp=time.time()\nroot = Tk()\nroot.title(\"Solar cell power measurement\")\n\nl=np.zeros(13)\nt=0\nh=0\ncal=np.array([0,0,0,0])\ncal0=0\nser=serial.Serial('/dev/ttyACM0',115200,timeout=20)\n\ndef clock():\n global timestamp,t,l\n seconds=time.time()-timestamp\n horas=int(seconds/3600)\n minutos=int(seconds/60-horas*60)\n segundos=int((seconds/60-int(seconds/60))*60)\n tempo.config(text='{:02.0f}'.format(horas)+\":\"+'{:02.0f}'.format(minutos)+\":\"+'{:02.0f}'.format(segundos))\n tempo.after(1000,clock)\n calib.config(text='{:03.1f}'.format(l[0]-cal0))\n v1.config(text='{:04.0f}'.format(l[2]))\n v2.config(text='{:04.0f}'.format(l[4]))\n v3.config(text='{:04.0f}'.format(l[6]))\n v4.config(text='{:04.0f}'.format(l[8]))\n a1.config(text='{:04.0f}'.format(l[1]))\n a2.config(text='{:04.0f}'.format(l[3]))\n a3.config(text='{:04.0f}'.format(l[5]))\n a4.config(text='{:04.0f}'.format(l[7]))\n p1.config(text='{:05.1f}'.format(l[9]))\n p2.config(text='{:05.1f}'.format(l[10]))\n p3.config(text='{:05.1f}'.format(l[11]))\n p4.config(text='{:05.1f}'.format(l[12]))\n temperatura.config(text=t)\n humidity.config(text=h)\n \ndef save():\n global pandas,nome\n nome=filename.get()\n pandas.to_csv(\"~/\"+nome+\"-PV.csv\")\n\ndef zerar():\n global timestamp,pandas,leitura\n l=np.zeros(13)\n timestamp=time.time()\n pandas=pd.DataFrame([[pd.Timestamp.now(),t,h,0,0,0,0,0,0,0,0,0,0,0,0]],columns=['datetime','t','rh','v1','a1','p1','v2','a2','p2','v3','a3','p3','v4','a4','p4'])\n \ndef leserial():\n global pandas,l,t,h,leitura,cal0\n time.sleep(1)\n ser.flush()\n ser.flushInput()\n ser.flushOutput()\n while True:\n if ser.in_waiting>0:\n leitura=np.array(ser.readline().decode().rstrip().split(' ')).astype(float)\n t=0\n h=0\n i=1\n leitura=leitura*0.8056\n if cal[0]==0:\n cal0=leitura[0]\n cal[0]=leitura[1]-leitura[0]\n cal[1]=leitura[3]-leitura[0]\n cal[2]=leitura[5]-leitura[0]\n cal[3]=leitura[7]-leitura[0]\n while i<9:\n l[i]=leitura[i]-leitura[0]\n i+=2\n i=2\n while i<9:\n l[i]=leitura[i]\n i+=2\n l[0]=leitura[0]\n l[1]=(l[1]-cal[0])/0.9\n l[3]=(l[3]-cal[1])/0.9\n l[5]=(l[5]-cal[2])/0.9\n l[7]=(l[7]-cal[3])/0.9\n l[9]=l[1]*l[2]/1000\n l[10]=l[3]*l[4]/1000\n l[11]=l[5]*l[6]/1000\n l[12]=l[7]*l[8]/1000\n pandas=pandas.append(pd.DataFrame([[pd.Timestamp.now(),t,h,l[2],l[1],l[9],l[4],l[3],l[10],l[6],l[5],l[11],l[8],l[7],l[12]]],columns=['datetime','t','rh','v1','a1','p1','v2','a2','p2','v3','a3','p3','v4','a4','p4']),ignore_index=True)\n \ndef graph():\n fig, ax=plt.subplots()\n p1,=ax.plot(pandas.set_index('datetime')['p1'],label='Cell 1')\n p2,=ax.plot(pandas.set_index('datetime')['p2'], label='Cell 2')\n p3,=ax.plot(pandas.set_index('datetime')['p3'], label='Cell 3')\n p4,=ax.plot(pandas.set_index('datetime')['p4'], label='Cell 4')\n ax.legend()\n ax.set_ylabel('Power (mW)')\n ax.tick_params('x',labelrotation=45)\n plt.tight_layout()\n plt.show()\n \nLabel(root,text=\"Temperature (°C)\").grid(row=0,column=1)\ntemperatura=Label(root,text=\"\")\ntemperatura.grid(row=1,column=1)\n\nLabel(root,text=\"VCC Cal\").grid(row=0,column=3)\ncalib=Label(root,text=\"\")\ncalib.grid(row=1,column=3)\n\nLabel(root,text=\"RH (%)\").grid(row=0,column=2)\nhumidity=Label(root,text=\"\")\nhumidity.grid(row=1,column=2)\n\nLabel(root,text=\"Elapsed time\").grid(row=0,column=0)\ntempo=Label(root,text=\"\",fg=\"green\")\ntempo.grid(row=1,column=0)\n\nLabel(root,text=\"Voltage(mV)\",fg=\"blue\").grid(row=3,column=1)\nLabel(root,text=\"Current(mA)\",fg=\"red\").grid(row=3,column=2)\nLabel(root,text=\"Power(mW)\",fg=\"red3\").grid(row=3,column=3)\n\nLabel(root,text=\"Cell 1:\").grid(row=4,column=0)\nv1=Label(root,text=\"0\",fg=\"blue\")\nv1.grid(row=4,column=1)\na1=Label(root,text=\"0\",fg=\"red\")\na1.grid(row=4,column=2)\np1=Label(root,text=\"0\",fg=\"red3\")\np1.grid(row=4,column=3)\n\nLabel(root,text=\"Cell 2:\").grid(row=5,column=0)\nv2=Label(root,text=\"0\",fg=\"blue\")\nv2.grid(row=5,column=1)\na2=Label(root,text=\"0\",fg=\"red\")\na2.grid(row=5,column=2)\np2=Label(root,text=\"0\",fg=\"red3\")\np2.grid(row=5,column=3)\n\nLabel(root,text=\"Cell 3:\").grid(row=6,column=0)\nv3=Label(root,text=\"0\",fg=\"blue\")\nv3.grid(row=6,column=1)\na3=Label(root,text=\"0\",fg=\"red\")\na3.grid(row=6,column=2)\np3=Label(root,text=\"0\",fg=\"red3\")\np3.grid(row=6,column=3)\n\nLabel(root,text=\"Cell 4:\").grid(row=7,column=0)\nv4=Label(root,text=\"0\",fg=\"blue\")\nv4.grid(row=7,column=1)\na4=Label(root,text=\"0\",fg=\"red\")\na4.grid(row=7,column=2)\np4=Label(root,text=\"0\",fg=\"red3\")\np4.grid(row=7,column=3)\n\nLabel(root,text=\"Insert the filename\").grid(row=8,column=1)\nLabel(root,text=\"~/\").grid(row=9,column=0)\n\nfilename=Entry(root,width=20)\nfilename.grid(row=9,column=1)\nfilename.insert(0,nome)\n\nButton(root,text=\"Graph: Power\",command=graph).grid(row=10,column=0)\n\nButton(root,text=\"Save data\",command=save).grid(row=10,column=1)\n\nButton(root,text=\"Restart Measurement\",command=zerar).grid(row=10,column=2)\n\nclock()\npandas=pd.DataFrame([[pd.Timestamp.now(),t,h,0,0,0,0,0,0,0,0,0,0,0,0]],columns=['datetime','t','rh','v1','a1','p1','v2','a2','p2','v3','a3','p3','v4','a4','p4'])\nt1=threading.Thread(target=leserial)\nt1.start()\n\nroot.mainloop()\n","repo_name":"mbelancon/SolarPower","sub_path":"pv-stm32.py","file_name":"pv-stm32.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17835544237","text":"import sys\r\nimport logging\r\nimport os\r\nimport os.path\r\nimport codecs\r\nimport json\r\nimport datetime\r\nimport config\r\n\r\nimport webserver.www as www\r\nimport webserver.templater as templater\r\nimport db\r\n\r\n\r\n\r\n\r\n## register items\r\n\r\n\r\nclass RuleList_Handler(www.Handler_Base):\r\n def __init__(self):\r\n www.Handler_Base.__init__(self)\r\n\r\n def Path(self):\r\n return ('/rules/list')\r\n \r\n def Dispatch(self, opts, request, args={}):\r\n\r\n \r\n \r\n rules = db.RuleService().LoadAll()\r\n \r\n request.send_response(200) # OK\r\n request.send_header('Content-type', self.ContentType())\r\n request.end_headers()\r\n \r\n tplmgr = templater.Templater(opts.rootdir)\r\n \r\n html = \"\"\r\n for rule in rules:\r\n \r\n fragment = tplmgr.GenerateFragment(\"listentry\", [ { 'tag': 'rule', 'obj': rule } ], subdir='rules')\r\n html += fragment\r\n\r\n g = config.O()\r\n g.body = html\r\n html = tplmgr.GenerateFragment(\"listwrapper\", [ { 'tag': 'wrapper', 'obj': g } ], subdir='rules')\r\n \r\n g = config.O()\r\n g.page_title = config.www.generic_title + \"Rule List\"\r\n g.body = html\r\n g.title = \"Rule List\"\r\n\r\n fdata = tplmgr.Generate(\"generic\", [ { 'tag': 'generic', 'obj': g } ] ) \r\n request.wfile.write(fdata.encode('utf-8'))\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef RegisterHandlers( handlers={} ):\r\n \r\n h = RuleList_Handler()\r\n handlers[h.Path()] = h\r\n \r\n\r\n \r\n return handlers\r\n","repo_name":"juanmcasillas/CatFeederPrototype","sub_path":"python/webserver/handlers/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"25305604128","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 4 01:59:31 2020\n\n@author: Cheerag\n\"\"\"\n\n\ndef trailingZero(n):\n i=1\n flag = True\n result = 0\n while flag == True:\n ans = n//(5**i)\n if ans == 0:\n flag = False \n result +=ans\n #print(result)\n i+=1\n \n return result\n\n#n = int(input())\nprint(trailingZero(50))\n","repo_name":"iamcheerag/Machine-Learning-Ninjas","sub_path":"Conditions loops and Functions/Assignment/Trailing zero in n!.py","file_name":"Trailing zero in n!.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"27253796270","text":"from torch.utils.data import Dataset\r\nimport os\r\nfrom PIL import Image\r\nfrom torchvision import transforms\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\nclass face_dataset(Dataset):\r\n def __init__(self, path, size):\r\n self.path = path\r\n self.size = size\r\n self.dataset = []\r\n self.dataset.extend(open(os.path.join(self.path, str(self.size), \"positive.txt\")).readlines())\r\n self.dataset.extend(open(os.path.join(self.path, str(self.size), \"negative.txt\")).readlines())\r\n self.dataset.extend(open(os.path.join(self.path, str(self.size), \"part.txt\")).readlines())\r\n\r\n def __getitem__(self, index):\r\n lines = self.dataset[0]\r\n line = lines.strip().split(\" \")\r\n line = list(filter(lambda x: bool(x), line))\r\n image_name = line[0]\r\n img_path = os.path.join(self.path, str(self.size), image_name)\r\n image = np.array(Image.open(img_path))\r\n img = torch.Tensor((image / 255 - 0.5).transpose([2, 0, 1]))\r\n\r\n # transform = transforms.Compose(\r\n # transforms.ToTensor()\r\n # )\r\n # img = transform(image)\r\n cond = torch.Tensor([int(line[-1])])\r\n offset = torch.Tensor(list(map(float, line[1:-1])))\r\n\r\n return img, cond, offset\r\n\r\n def __len__(self):\r\n return len(self.dataset)\r\n\r\n\r\nif __name__ == '__main__':\r\n path = r\"E:\\celeba\\train_dataset\"\r\n img_size = 12\r\n data = face_dataset(path, img_size)\r\n img, cond, offset = data.__getitem__(0)\r\n\r\n print(img.shape)\r\n print(cond)\r\n print(offset)\r\n\r\n\r\n","repo_name":"harrywellington9588/FACE-DETECTION","sub_path":"my_program/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"69888695947","text":"import socket\r\nimport threading\r\nimport time\r\n\r\nHEADER = 64 # Header, gonderilecek veri hakkında onceden server'i bilgilendirmek icin. Header icin fixed 64 byte boyut belirledim.\r\nPORT = 8000\r\nFORMAT = 'utf-8'\r\nDISCONNECT_MESSAGE = \"!DISCONNECT\" # Server'dan kopmadan once server'a gonderilecek mesaj\r\nSERVER = \"192.168.0.21\" #Server IP\r\nADDR = (SERVER, PORT)\r\n\r\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Client tanımlaması, socket() icindeki protokoller ile protokol belirleniyor\r\nclient.connect(ADDR) \r\n\r\nconnected = True # Baglı oldugumuz surece True kalacak degisken\r\n\r\ndef send(msg): # Server'a mesaj gonderen fonksiyon\r\n message = msg.encode(FORMAT)\r\n msgLength = len(message) # Once mesajın boyutunun belirtileceigin soylemistim\r\n sendLength = str(msgLength).encode(FORMAT)\r\n sendLength = (b' ' * (HEADER - len(sendLength))) + sendLength # Header 64 byte, 64'e tamamlamak icin boslukları dlduruyorum\r\n client.send(sendLength) # Once header, yani mesajın uzunlugunu gonder, bunun icin server onay mesajı dondurmeyecek\r\n client.send(message) # Mesajı gonderiyoruz\r\n\r\n\r\ndef receive(): # Surekli server dinlensin diye olusturulmus fonk.\r\n while (connected): # Baglı oldugumuz surece\r\n msgLength = client.recv(HEADER).decode(FORMAT)\r\n if msgLength: # Boyle bir header bilgisi geldiyse, mesaj da gelecek demektir\r\n msgLength = int(msgLength) \r\n msg = client.recv(msgLength).decode(FORMAT) # mesaji al\r\n print(msg) # Ekrana yazdir\r\n \r\ndef start(): # Baslangıcta serverı surekli dinlemek icin thread olusturup receive()'e bagladim\r\n receiveThread = threading.Thread(target = receive, args = ()) \r\n receiveThread.start()\r\n \r\n\r\nstart()\r\nwhile(True): #Surekli kullanıcıdan input bekle\r\n msg = input()\r\n if msg == \"e\": # e karakterini disconnect olmak icin bir kod olacak sekilde ayarladım. e girilirse serverdan kop\r\n msg = DISCONNECT_MESSAGE\r\n send(msg)\r\n connected = False # Koptuk\r\n break\r\n else:\r\n send(msg) # Eger input e degilse, mesajdir. Gonder","repo_name":"aonurakman/PythonLocalChatRoom","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27278942398","text":"# Class MotionDetector\n\n'''\n-------------Frame Differencing algorithm---------------\nAccumulate the weighted average between the current frame\nand the previous frames, then compute the pixel-wise differences\nbetween the current frame and running average,\nfaster algorithm for real time system\n'''\n\nimport imutils\nimport cv2\n\n\nclass MotionDetector:\n def __init__(self, _accum_weight=0.5, _delta_thresh=10, _min_area=5000, _resize_ratio=0.5):\n self.isv2 = imutils.is_cv2() # determine the OpenCV version\n self._accum_weight = _accum_weight # the frame accumulation weight\n self._delta_thresh = _delta_thresh # fixed threshold for the delta image\n self._min_area = _min_area * _resize_ratio # min area for motion detected\n self._resize_ratio = _resize_ratio # image resize ratio\n\n # initialize the average image for motion detection\n self._avg = None\n\n def update(self, image_gray):\n # Resize input image to smaller size, accumulate process\n image_small = imutils.resize(image_gray, width=int(image_gray.shape[1]*self._resize_ratio),\n height=int(image_gray.shape[0]*self._resize_ratio))\n # initialize the list of locations containing motion\n locs = []\n\n # if the average image is None, initialize it\n if self._avg is None:\n self._avg = image_small.astype(\"float\")\n return locs\n\n cv2.accumulateWeighted(image_small, self._avg, self._accum_weight)\n frame_delta = cv2.absdiff(image_small, cv2.convertScaleAbs(self._avg))\n\n # threshold the delta image and apply dilations\n image_thresh = cv2.threshold(\n frame_delta, self._delta_thresh, 255, cv2.THRESH_BINARY)[1]\n image_thresh = cv2.dilate(image_thresh, None, iterations=2)\n # cv2.imshow(\"Thresh\", image_thresh)\n\n # find contours in the thresholded image\n cnts = cv2.findContours(\n image_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if self.isv2 else cnts[1]\n\n # loop over the contours\n for c in cnts:\n # only add the contour to the locs list if it > _min_area\n if cv2.contourArea(c) > self._min_area:\n locs.append(c)\n\n # NOTE: these locations are in this small size image coordinate system.\n return locs\n","repo_name":"zcsd/Surveillance_System","sub_path":"src/motion_detector.py","file_name":"motion_detector.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"35306362465","text":"# Day 2: 30 Days of python programming\nimport math\n\nfirst_name = 'Zain'\nlast_name = 'Safdar'\nfull_name = 'Zain Safdar'\ncountry = 'Wonderland'\ncity = 'Moon'\nage = 20.0\nyear = 2003\nis_married = False\nis_true = True\nis_light_on = False\nhappy, sad, excited = True, False, \"Ofcourse\"\n\nprint(type(full_name),type(age),type(year))\n\nif len(first_name) > len(last_name):\n print(f'{first_name} is longer than {last_name}')\nelif len(first_name) < len(last_name):\n print(f'{last_name} is longer than {first_name}')\nelse:\n print(f'Both names are equal')\n\nnum_one, num_two = 5,4\n\ntotal = num_one + num_two\ndiff = num_one - num_two\nproduct = num_one * num_two\ndivision = num_one / num_two\nremainder = num_one % num_two\nexp = num_one ** num_two\nfloor_division = num_one // num_two\n\nradius = 30 # User input\narea_of_circle = math.pi * radius ** 2\ncircum_of_circle = 2 * math.pi * radius\n\nprint(f'Area: {area_of_circle}')\nprint(f'Circumference: {circum_of_circle}')\n\nfirst_name = input(\"Enter First Name: \")\nlast_name = input(\"Enter Last Name: \")\ncountry = input(\"Enter Country: \")\ncity = input(\"Enter City: \")\nage = int(input(\"Enter Age: \"))\nyear = int(input(\"Enter Year: \"))\n\nhelp('keywords')","repo_name":"Stonkszain/30-Days-Of-Python","sub_path":"day_2/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11204005507","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom time import sleep\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\n\n# class to store macrotrends data after scraping from macrotrends website\n\n\nclass MacroTrendsAPI:\n\n def __init__(self, master):\n # class variables\n self.table_options = [] # stores the table_options for different data types\n self.soup = None # stores the beautiful soup object\n self.col_headers = None # column headers for the table\n self.col_data = None # column data for the different headers\n\n # retrieve macrotrends stock screener page using selenium\n opts = Options()\n opts.add_argument('-headless')\n self.browser = Firefox(\n executable_path=r'D:\\WORK\\Python\\DataAnalysis\\resources\\gecko_driver\\geckodriver.exe')\n self.browser.get('https://www.macrotrends.net/stocks/stock-screener')\n\n sleep(2)\n # parse html content\n self.soup = BeautifulSoup(self.browser.page_source, 'lxml')\n\n # get table options\n data_options = self.soup.find(id='myPills1')\n\n # list containing table options id's\n col_opts = ['columns_overview', 'columns_descriptive', 'columns_dividend', 'columns_performance_st',\n 'columns_performance_lt', 'columns_ratios_income', 'columns_ratios_debt', 'columns_rev_earnings']\n\n # for each column option get the text\n for key, col in enumerate(col_opts):\n\n # extract table options text\n li = data_options.find(id=col)\n a = li.find('a').text\n\n self.table_options.append(a)\n\n # function to get the table once the table option has been selected\n def getTable(self, selection=None):\n\n overview_but = '/html/body/div[1]/div[4]/div[2]/div/ul/li[1]/a'\n descriptive_but = '/html/body/div[1]/div[4]/div[2]/div/ul/li[2]/a'\n dividends_but = '/html/body/div[1]/div[4]/div[2]/div/ul/li[3]/a'\n performance_st_but = '/html/body/div[1]/div[4]/div[2]/div/ul/li[4]/a'\n performance_lt_but = '/html/body/div[1]/div[4]/div[2]/div/ul/li[5]/a'\n income_ratios_but = '/html/body/div[1]/div[4]/div[2]/div/ul/li[6]/a'\n debt_ratios_but = '/html/body/div[1]/div[4]/div[2]/div/ul/li[7]/a'\n revenue_earnings_but = '/html/body/div[1]/div[4]/div[2]/div/ul/li[8]/a'\n\n if selection == 'columns_overview':\n self.browser.find_element_by_xpath(overview_but).click()\n elif selection == 'columns_descriptive':\n self.browser.find_element_by_xpath(descriptive_but).click()\n elif selection == 'columns_dividend':\n self.browser.find_element_by_xpath(dividends_but).click()\n elif selection == 'columns_performance_st':\n self.browser.find_element_by_xpath(performance_st_but).click()\n elif selection == 'columns_performance_lt':\n self.browser.find_element_by_xpath(performance_lt_but).click()\n elif selection == 'columns_ratios_income':\n self.browser.find_element_by_xpath(income_ratios_but).click()\n elif selection == 'columns_ratios_debt':\n self.browser.find_element_by_xpath(debt_ratios_but).click()\n elif selection == 'columns_rev_earnings':\n self.browser.find_element_by_xpath(revenue_earnings_but).click()\n else:\n print('No data set selection made')\n sleep(5)\n self.parseTable(self.browser)\n\n done = False\n counter = 0\n while (done == False):\n # try:\n # TODO:put data into dataframe\n # TODO:save the column headers in the database as well\n\n # click next button\n next_button = '/html/body/div[1]/div[4]/div[2]/div/div/div/div/div[10]/div/div[4]/div'\n self.browser.find_element_by_xpath(next_button).click()\n # wait to retrieve content\n sleep(2)\n\n # parse table for new content\n self.parseTable(self.browser)\n\n counter = counter + 1\n\n if counter == 10:\n done = True\n self.browser.quit()\n\n # except Error:\n # print('Something here')\n\n # function to parse table\n\n def parseTable(self, browser):\n self.col_headers = []\n self.col_data = []\n table = []\n\n # find table with selenium\n results = browser.find_element_by_id('contextjqxGrid')\n print('\\n', results, '\\n')\n\n # get table\n self.soup = BeautifulSoup(\n browser.page_source, 'html.parser') # 'lxml')\n\n # printing from soup\n print('Soup printed : \\n', self.soup.find_all(\n id=\"contentjqxGrid\"))\n self.soup = self.soup.find_all(id=\"contextjqxGrid\")\n\n # displaying contents from table\n print('Table: \\n', self.soup)\n col_table = table.find(id=\"columntablejqxGrid\")\n\n # get table data\n for key, element in enumerate(list(col_table.children)):\n print(element.text)\n self.col_headers.append(element.text)\n self.col_data.append([])\n\n # get data under the headers\n table_data = table.find(id=\"contenttablejqxGrid\")\n for key, element in enumerate(list(table_data.children)):\n for key1, item in enumerate(list(element.children)):\n print(item.text)\n self.col_data[key1].append(item.text)\n\n print('COL headers: \\n', self.col_headers)\n print(\"COL data: \\n\", self.col_data)\n","repo_name":"TheFaro/DataAnalysis","sub_path":"UI/macrotrends_data.py","file_name":"macrotrends_data.py","file_ext":"py","file_size_in_byte":5576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39046874805","text":"from labjack import ljm\nimport numpy as np\nfrom scipy.signal import resample\nfrom labyak import LabJack\n\nclass WaveformGenerator(LabJack):\n ''' Digital pattern generator based on the LabJack T7 '''\n def __init__(self, device='ANY', connection='ANY', devid='ANY'):\n super().__init__(device=device, connection=connection, devid=devid)\n\n def stream_out(self, channels, data, scanRate, loop = 0):\n ''' Streams data at a given scan rate.\n\n Args:\n channels (list): Output channels to stream on, e.g. ['DAC0', 'DAC1']\n data (array): Data to stream out. For streaming on multiple channels, use column 0 for DAC0 and column 1 for DAC1.\n scanRate (float): desired output rate in scans/s\n loop (int): number of values from the end of the buffer to loop after finishing stream\n '''\n\n self.stop()\n n = np.ceil(np.log10(2*(1+len(data)))/np.log10(2))\n buffer_size = 2**n\n\n for i in range(len(channels)):\n self._write_dict({f'STREAM_OUT{i}_TARGET': 1000+2*channels[i],\n f'STREAM_OUT{i}_BUFFER_SIZE': buffer_size,\n f'STREAM_OUT{i}_ENABLE': 1\n })\n\n target = ['STREAM_OUT%i_BUFFER_F32'%i] * len(data)\n self._write_array(target, list(data))\n\n self._write_dict({f'STREAM_OUT{i}_LOOP_SIZE': loop*len(data),\n f'STREAM_OUT{i}_SET_LOOP': 1\n })\n self.aScanList.append(4800+i) # add stream-out register to scan list\n\n scanRate = ljm.eStreamStart(self.handle, 1, len(self.aScanList), self.aScanList, scanRate)\n\n def prepare_stream(self, channels):\n self.stop()\n\n ''' Set stream parameters '''\n self.aScanList = []\n self._write_dict({'STREAM_SETTLING_US': 0,\n 'STREAM_RESOLUTION_INDEX': 0,\n 'STREAM_CLOCK_SOURCE': 0\n })\n\n def prepare_stream_trigger(self, ch):\n if ch is None:\n self._command(\"STREAM_TRIGGER_INDEX\", 0) # disable triggered stream\n else:\n self._write_dict({f\"DIO{ch}_EF_ENABLE\": 0,\n f\"DIO{ch}_EF_INDEX\": 3,\n f\"DIO{ch}_EF_OPTIONS\": 0,\n f\"DIO{ch}_EF_VALUE_A\": 2,\n f\"DIO{ch}_EF_ENABLE\": 1,\n \"STREAM_TRIGGER_INDEX\": 2000+ch\n })\n ljm.writeLibraryConfigS('LJM_STREAM_RECEIVE_TIMEOUT_MS',0) #disable timeout\n\n def optimize_stream(self, array, period, max_samples = 8191):\n ''' Compute optimum scan rate and number of samples '''\n if self.deviceType == ljm.constants.dtT7:\n max_speed = 100000\n elif self.deviceType == ljm.constants.dtT4:\n max_speed = 40000\n\n cutoff = max_samples / max_speed\n if period >= cutoff:\n samples = max_samples\n scanRate = int(samples/period)\n else:\n scanRate = max_speed\n samples = int(period*scanRate)\n\n stream = resample(array, samples)\n return stream, scanRate\n\n def start(self, t, V, channels = ['DAC0']):\n print('Resampling to optimal scan rate')\n data, scanRate = self.optimize_stream(V, np.max(t))\n print('Preparing stream')\n self.prepare_stream(channels)\n self.prepare_stream_trigger(None)\n\n print('Starting stream')\n self.stream_out([int(x[-1]) for x in channels], data, scanRate, loop=1)\n\nif __name__ == '__main__':\n p = WaveformGenerator(devid='470018954')\n f = 5e3\n t = np.linspace(0, 1/f, 300)\n V = 2.5*(1+np.sin(2*np.pi*f*t))\n p.start(t, V)\n","repo_name":"robertfasano/labyak","sub_path":"labyak/deprecated/waveform_generator.py","file_name":"waveform_generator.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"70114268425","text":"from typing import Iterator\n\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom src.infrastructure.adapters.database.tables.server import ServerMember\nfrom src.infrastructure.adapters.database.repositories.errors import NotFoundError\n\n\nclass ServerMemberRepository:\n def __init__(self, session_factory: AsyncSession) -> None:\n self.session_factory = session_factory\n\n async def get_all(self) -> Iterator[ServerMember]:\n async with self.session_factory() as session:\n return await session.query(ServerMember).all()\n\n async def get_by_id(self, server_member_id: int) -> ServerMember:\n async with self.session_factory() as session:\n user = (\n await session.query(ServerMember)\n .filter(ServerMember.id == server_member_id)\n .first()\n )\n if not user:\n raise ServerMemberNotFoundError(server_member_id)\n return user\n\n async def add(self, server_role: ServerMember) -> ServerMember:\n async with self.session_factory() as session:\n session.add(server_role)\n await session.commit()\n await session.refresh(server_role)\n return server_role\n\n async def delete_by_id(self, server_member_id: int) -> None:\n async with self.session_factory() as session:\n entity: ServerMember = (\n await session.query(ServerMember)\n .filter(ServerMember.id == server_member_id)\n .first()\n )\n if not entity:\n raise ServerMemberNotFoundError(server_member_id)\n session.delete(entity)\n await session.commit()\n\n\nclass ServerMemberNotFoundError(NotFoundError):\n\n entity_name: str = \"ServerRole\"\n","repo_name":"auredentan/discord-clone","sub_path":"backend/src/infrastructure/adapters/database/repositories/server_member.py","file_name":"server_member.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30534867370","text":"#! python3\n# fill_gaps.py\n# Author: Kene Udeh\n# Source: Automate the Boring stuff with python Ch. 9 Project\n\nimport os\nimport re\nimport shutil\n\ndef getFilesWithPrefix(folderPath, prefix):\n \"\"\"get all files with a certain prefix\n Args:\n folderPath (str): path to folder to search\n Returns:\n\n \"\"\"\n fileRegex = re.compile(prefix+'(\\d{1,})(.\\w+)')\n fileList = sorted( [file for file in os.listdir(folderPath) if fileRegex.match(file)] )\n return fileList\n\ndef fillGaps(folderPath, prefix):\n \"\"\"fill gaps in numbering of files in folder\n Args:\n folderPath (str): path to folder to search\n prefix (str): prefix of files to fill gap\n Returns:\n None\n \"\"\"\n fileList = getFilesWithPrefix(folderPath, prefix) # files sorted ascending order\n fileRegex = re.compile(prefix+'(\\d{1,})(.\\w+)')\n\n start = int(fileRegex.search(fileList[0]).group(1)) # start with the minimum number in list\n count = start # count to be incremented during checks for gaps\n max_length = len(fileRegex.search(fileList[-1]).group(1)) # max length of largest number, for padding zeros\n\n for file in fileList:\n\n mo = fileRegex.search(file)\n fileNum = int(mo.group(1))\n\n if fileNum != count:\n newFileName = prefix + '0'*(max_length-len(str(fileNum))) + str(count) + mo.group(2)\n shutil.move(os.path.abspath(file), os.path.abspath(newFileName))\n\n count += 1\n\ndef insertGaps(folderPath, prefix, index):\n \"\"\"insert gaps in numbering of files in folder\n Args:\n folderPath (str): path to folder to search\n prefix (str): prefix of files to insert gap\n index (int): where to insert the gap\n Returns:\n None\n \"\"\"\n\n fileList = getFilesWithPrefix(folderPath, prefix) # files sorted ascending order\n fileRegex = re.compile(prefix+'(\\d{1,})(.\\w+)')\n\n max_length = len(fileRegex.search(fileList[-1]).group(1)) # max length of largest number, for padding zeros\n\n firstIndex = int(fileRegex.search(fileList[0]).group(1)) # smallest number \n lastIndex = int(fileRegex.search(fileList[-1]).group(1)) # largest number\n\n if index >= firstIndex and index <= lastIndex: # if gap index falls in range\n\n i = 0\n currIndex = firstIndex\n while currIndex < index:\n # loop till the file number is >= gap index \n i += 1\n currIndex = int(fileRegex.search(fileList[i]).group(1))\n\n if currIndex == index: # if gap index is taken, make a gap else already free\n\n for file in fileList[i:][::-1]:\n # loop through reversed file list, to prevent overwriting results and increment file number\n\n mo = fileRegex.search(file)\n newFileNum = int(mo.group(1)) + 1\n newFileName = prefix + '0'*(max_length-len(str(newFileNum))) + str(newFileNum) + mo.group(2)\n shutil.move(os.path.abspath(file), os.path.abspath(newFileName))\n\n\nif __name__ == \"__main__\":\n \n with open('spam001.txt', 'w') as s1, open('spam003.txt', 'w') as s3:\n s1.write('spam001')\n s3.write('spam003')\n\n fillGaps('.', 'spam')\n #insertGaps('.', 'spam', 2)\n ","repo_name":"kudeh/automate-the-boring-stuff-projects","sub_path":"fill-gaps/fill_gaps.py","file_name":"fill_gaps.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":310,"dataset":"github-code","pt":"81"} +{"seq_id":"72647145546","text":"import matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nimport jieba\n\ntext_from_file_with_apath = open('C:/Users/Public/Documents/python/result.txt', 'rb').read()\nfont = 'C:/Users/Public/Documents/python/font/simfang.ttf'\n\nwordlist_after_jieba = jieba.cut(text_from_file_with_apath, cut_all = True)\nwl_space_split = \" \".join(wordlist_after_jieba)\n\nmy_wordcloud = WordCloud(collocations=False, font_path=font, width=2000, height=2000, margin=2).generate(wl_space_split)\n\nplt.imshow(my_wordcloud)\nplt.axis(\"off\")\nplt.show()","repo_name":"Aissue/myPython","sub_path":"com/levelappro/ciyun/mywordcloud3.py","file_name":"mywordcloud3.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31899671145","text":"from mytodolists.front import Printers\nfrom mytodolists.user_inputs import UserMainInput\nfrom mytodolists.custom_exceptions import (\n TableDoesntExistError, DatabaseNotConnectedError\n)\nfrom user_interface import UserInterfaceManager\nfrom constants import MainOptions\n\n\nclass UserMenu:\n\n @staticmethod\n def menu_handler() -> None:\n \"\"\"Main menu.\"\"\"\n\n Printers.print_welcome()\n\n functions = {\n MainOptions.CREATE_NEW_LIST.value:\n UserInterfaceManager().create_new_list,\n MainOptions.ADD_NEW_TASK.value:\n UserInterfaceManager().add_new_task,\n MainOptions.LIST_OPTIONS.value:\n UserInterfaceManager().list_submenu,\n MainOptions.OBTAIN_OPTIONS.value:\n UserInterfaceManager().obtain_submenu,\n MainOptions.MODIFY_TASK.value: UserInterfaceManager().modify_task,\n MainOptions.DELETE_TASK.value: UserInterfaceManager().delete_task,\n MainOptions.SEARCH_OPTIONS.value:\n UserInterfaceManager().search_submenu,\n MainOptions.SORT_GROUP_TASKS.value:\n UserInterfaceManager().sortgroup_submenu\n }\n\n ask = True\n while ask:\n Printers.print_menu()\n choice = UserMainInput().choice_main_menu()\n if choice == MainOptions.EXIT.value:\n ask = False\n break\n try:\n functions[choice]()\n except (KeyError, ValueError):\n Printers.print_error_choice()\n except (DatabaseNotConnectedError, TableDoesntExistError):\n Printers.print_critical()\n ask = False\n\n\nif __name__ == '__main__':\n UserMenu.menu_handler()\n","repo_name":"PaolaCartala/PythonExercisesOOP","sub_path":"week4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15962015730","text":"import time\nimport RPi.GPIO as GPIO\nimport os\nimport random\nimport logging\n\nlogging.basicConfig(filename='/home/pi/atc-module/atc.log')\n\n\n#Pi3 pins For traffic lights\nSDI = 11\nCLK = 13\nLATCH = 15\nOE = 12\n\nGreen_Time = []\nDefault_Green_Time = []\nYellow_Time = 5\nSignal = []\nGreen_Signal_Byte = []\nYellow_Signal_Byte = []\n\nNo_of_lanes = 4\n\nON_Byte = \"\"\n\ndef config_pi():\n\tGPIO.setwarnings(False)\n\tGPIO.setmode(GPIO.BOARD) \n\tGPIO.setup(SDI, GPIO.OUT)\n\tGPIO.setup(CLK, GPIO.OUT)\n\tGPIO.setup(LATCH, GPIO.OUT)\n\tGPIO.setup(OE, GPIO.OUT)\n\tGPIO.output(SDI, GPIO.LOW)\n\tGPIO.output(CLK, GPIO.LOW)\n\tGPIO.output(LATCH, GPIO.LOW)\n\tGPIO.output(OE, GPIO.HIGH)\n\tGPIO.output(OE, 1)\n\n\ndef shiftout(byte):\n GPIO.output(LATCH, 0)\n for x in range(4*No_of_lanes):\n GPIO.output(SDI, (byte >> x) & 1)\n GPIO.output(CLK, 1)\n time.sleep(0.001)\n GPIO.output(CLK, 0)\n GPIO.output(LATCH, 1)\n\ndef intialize_lights():\n logging.warning(\"ON\")\n GPIO.output(OE, 1)\n temp_on = list(ON_Byte)\n On_Byte = \"\".join(temp_on)\n hex_int = int(On_Byte, 16)\n logging.warning(\"Byte:\",hex(hex_int))\n shiftout(~(hex_int))#0b11111111\n GPIO.output(OE, 0)\n time.sleep(3)\n logging.warning(\"OFF\")\n GPIO.output(OE, 1)\n shiftout((hex_int))#0b11111111\n GPIO.output(OE, 0)\n time.sleep(3)\n logging.warning(\"Done\")\n\ndef byte_def():\n\n global ON_Byte\n global Default_Green_Time\n Default_Green_Time = []\n logging.warning(\"Generating!\")\n Byte = \"0x\"\n ON_Byte = \"0x\"\n for i in range(No_of_lanes):\n Default_Green_Time.append(5)\n Signal.append(\"Red\")\n Byte = Byte + \"1\"\n ON_Byte = ON_Byte + \"F\"\n\n for i in range(No_of_lanes):\n temp_g = list(Byte)\n temp_y = list(Byte)\n temp_g[i+2] = \"C\"\n temp_y[i+2] = \"2\"\n Green_Byte = \"\".join(temp_g)\n Yellow_Byte= \"\".join(temp_y)\n hex_int = int(Green_Byte, 16)\n Green_Signal_Byte.append(hex_int)\n logging.warning(\"Byte:\",hex(hex_int))\n hex_int = int(Yellow_Byte, 16)\n Yellow_Signal_Byte.append(hex_int)\n logging.warning(\"Byte:\",hex(hex_int))\n\n logging.warning(\"Green Time: \"+str(Green_Time))\n logging.warning(\"Green_Signal_Byte: \"+str(Green_Signal_Byte))\n logging.warning(\"Yellow_Signal_Byte: \"+str(Yellow_Signal_Byte))\n logging.warning(\"Generated!\")\n\ndef initialize_timing(Green_Time):\n\n global Red_Time\n Red_Time = []\n for i in range (0,len(Green_Time)):\n \tif Green_Time[i] != 0:\n Red_Time.append( ((len(Green_Time)-1)*Yellow_Time) + sum(Green_Time) - Green_Time[i] )\n \telse:\n \t\tRed_Time.append(0)\n logging.warning(\"Red Time: \"+str(Red_Time))\n\ndef data_fetch(data_status=True):\n while(data_status):\n data = \"\"\n try:\n file = open(\"data.txt\",\"r\")\n for line in file:\n data = line\n file.close()\n data_status = False\n return data\n\n except Exception as e:\n logging.warning(e)\n data_status = True\n\ndef delay_one_second(second,byte):\n timeout = time.time() + second\n while True:\n shiftout(~byte)\n if time.time() > timeout:\n break\n\ndef lights_control():\n logging.warning(\"\\nStarting controller!\")\n for lane in range(len(Green_Time)):\n if Green_Time[lane] == 0:\n logging.warning(\"\\nDoes not exist!\")\n else:\n Signal[lane] = \"Green\"\n logging.warning(\"Signal: \"+str(Signal))\n logging.warning(\"Red Time: \"+str(Red_Time))\n GPIO.output(OE, 1)\n shiftout(~Green_Signal_Byte[lane])\n GPIO.output(OE, 0)\n for i in range(Green_Time[lane]):\n for j in range(len(Red_Time)):\n shiftout(~Green_Signal_Byte[lane])\n if (j != lane) and (Red_Time[j] != 0):\n Red_Time[j] -= 1\n #time.sleep(1)\n delay_one_second(1,Green_Signal_Byte[lane])\n logging.warning(\"Red Time: \"+str(Red_Time))\n Signal[lane] = \"Yellow\"\n logging.warning(\"Signal: \"+str(Signal))\n GPIO.output(OE, 1)\n shiftout(~Yellow_Signal_Byte[lane])\n GPIO.output(OE, 0)\n for i in range(Yellow_Time):\n for j in range(len(Red_Time)):\n shiftout(~Yellow_Signal_Byte[lane])\n if (j != lane) and (Red_Time[j] != 0):\n Red_Time[j] -= 1\n #time.sleep(1)\n delay_one_second(1,Yellow_Signal_Byte[lane])\n logging.warning(\"Red Time: \"+str(Red_Time))\n Signal[lane] = \"Red\"\n\nif __name__ == '__main__':\n config_pi()\n byte_def()\n intialize_lights()\n while(1):\n text = data_fetch()\n if(text == \"\"):\n logging.warning(\"Data not found!\")\n continue\n elif(text == \"Error\"):\n logging.warning(\"Error in file!\")\n Green_Time = []\n Green_Time = Default_Green_Time\n logging.warning(\"Green Time: \"+str(Green_Time))\n else:\n data = \"\"\n Green_Time = []\n temp = list(text)\n for i in range(1,len(temp)):\n if ((temp[i] == ',') or (temp[i] == ']')):\n Green_Time.append(int(data))\n data = \"\"\n continue\n data = data + temp[i]\n logging.warning(\"Green Time: \"+str(Green_Time))\n initialize_timing(Green_Time)\n lights_control()\n\n\n","repo_name":"AnshumanMaharana/Collaborative-Traffic-Control","sub_path":"ATC_latest/ATC_TRAFFIC_CONTROLLER.py","file_name":"ATC_TRAFFIC_CONTROLLER.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35280051580","text":"import random\n\nsuits = [\n \"Hearts\",\n \"Spades\",\n \"Diamonds\",\n \"Clubs\",\n\n]\n\nranks = [\n \"Ace\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"10\",\n \"Jack\",\n \"Queen\",\n \"King\",\n]\n\nwhile True:\n raw_input(\"Press enter for a card\")\n print(\"{} of {}\".format(random.choice(ranks), random.choice(suits)))\n","repo_name":"5225225/27_programming_challenges","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38199901231","text":"# names for volume levels.\n# For finer resolution, you can prepend _ (less) or append _ (more)\n# In python, anything starting with _ isn't imported by\n# from foo import * (which I think is a bad idea)\n# That's why these are in a separate module.\n#\npppp = .05\npppp_ = .10\n_ppp = .16\nppp = .23\nppp_ = .30\n_pp = .38\npp = .45\npp_ = .52\n_p = .60\np = .67\np_ = .74\n_mp = .82\nmp = .89\nmp_ = .96\nmm = 1\n_mf = 1.04\nmf = 1.11\nmf_ = 1.18\n_f = 1.26\nf = 1.33\nf_ = 1.40\n_ff = 1.48\nff = 1.55\nff_ = 1.62\n_fff = 1.68\nfff = 1.77\nfff_ = 1.84\n_ffff = 1.92\nffff = 1.99\n\n__all__ = [\n 'pppp', 'pppp_',\n '_ppp', 'ppp', 'ppp_',\n '_pp', 'pp', 'pp_',\n '_p', 'p', 'p_',\n '_mp', 'mp', 'mp_',\n 'mm',\n '_mf', 'mf', 'mf_',\n '_f', 'f', 'f_',\n '_ff', 'ff', 'ff_',\n '_fff', 'fff', 'fff_',\n '_ffff', 'ffff'\n]\n","repo_name":"davidpanderson/Numula","sub_path":"numula/vol_name.py","file_name":"vol_name.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"73183868744","text":"'''\nGlobal configurations of the project.\ne.g. URL, path, and header.\n'''\n\n# Base URL for diferent websites\nSITES = ['xchuxing', 'diandong', 'cheyun', 'autohome', 'xincheping', '12365auto', 'd1ev']\n\nBASE_URL = {\n 'xchuxing': 'https://xchuxing.com', # 新出行\n 'diandong': 'https://www.diandong.com', # 电动邦\n 'cheyun': 'http://www.cheyun.com', # 车云\n 'autohome': 'https://www.autohome.com.cn/beijing', # 汽车之家\n 'xincheping': 'https://www.xincheping.com', # 新车评\n '12365auto': 'http://www.12365auto.com', # 车质网\n 'd1ev': 'https://d1ev.com', # 第一电动\n}\n\n# Data path\nDATA_PATH = './data'\nTASKS = ['task1', 'task2', 'task3']\n\n# An example header\nHEADER = {\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/json;charset=UTF-8',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36'\n}","repo_name":"DelinQu/LDA-crawler","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41683887010","text":"'''\nMolhore o jogo do DESAFIO 028 onde o computador vai \"pensar\" em um número entre 0 e 10.\nSó que agora o jogador vai tentar adivinhar até acertar, mostrando no final quantos palpites\nforam necessários para vencer\n'''\n\n'''\nfrom random import randint\nfrom time import sleep\ncomputador = randint(1, 10)\nprint('Sou seu computador...')\nprint('Acabei de pensar em um número entre 1 a 10.')\nprint('Será que você consegue adivinhar qual foi? ')\njogador = int(input('Qual é seu palpite: '))\nprint('ANALISANDO...')\nsleep(1)\npalpites = 1\nwhile jogador != computador:\n if jogador > computador:\n jogador = int(input('O número que pensei não foi MENOR. Tente novamente: '))\n elif jogador < computador:\n jogador = int(input('O número que pensei não foi MAIOR. Tente novamente: '))\n palpites += 1\n print('ANALISANDO...')\n sleep(2)\nprint(f'Acertou!! O número que eu escolhei foi {computador}.')\nprint(f'Foram necessários {palpites} palpites para você acertar.')\n'''\n\n\n# Outra forma de fazer (forma do Guanabara)\nfrom random import randint\nfrom time import sleep\n\ncomputador = randint(1, 10)\nprint('''Sou seu computador...\nAcabei de pensar em um número entre 1 e 10.\nSerá que você consegue adivinhar qual é? ''')\nacertou = False # ou outra atruibuição qualquer\npalpites = 0\nwhile not acertou: # Leia como enquanto False, repita (while not x: == while False) e (while x: == while True)\n jogador = int(input('Digite um número: '))\n palpites += 1\n if jogador == computador:\n acertou = True # Quando virá verdeiro, é encerrada a repetição\n elif jogador > computador:\n print('O número que eu pensei foi MENOR.')\n elif jogador < computador:\n print('O número que eu pensei foi MAIOR.')\nprint(f'Parabéns!! Você acertou!!! Eu pensei no número {computador}')\nprint(f'Foram necessários {palpites} tentativas para você acertar.')\n\n","repo_name":"brenolemes/exercicios-python","sub_path":"exercicios/CursoemVídeo/ex058.py","file_name":"ex058.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10371681764","text":"class Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n import copy\n tmp = copy.deepcopy(nums)\n length = len(nums)\n k %= length\n for i in range(length):\n nums[i] = tmp[(i + length - k) % length]\n","repo_name":"strawsyz/straw","sub_path":"ProgrammingQuestions/leetcode/189.py","file_name":"189.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41378480729","text":"import dash\n#import dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nfrom django_plotly_dash import DjangoDash\nimport pandas as pd\nfrom django.conf import settings\nfrom oss.models import Site, Instrument, Telescope, FacilityStatus\n\ndef format_link_entry(link_text, url):\n if 'None' in url:\n return 'All instruments'\n else:\n return f\"[\"+link_text+\"](\"+url+\")\"\n\nclass TelescopeStatus():\n def __init__(self):\n self.site = None\n self.site_id = None\n self.telescope = None\n self.telescope_id = None\n self.instrument = None\n self.instrument_id = None\n self.status = None\n self.comment = None\n\n def get_status(self, telescope=None, instrument=None):\n status = None\n if telescope:\n try:\n status = FacilityStatus.objects.filter(telescope=telescope).latest('last_updated')\n except FacilityStatus.DoesNotExist:\n pass\n elif instrument:\n try:\n status = FacilityStatus.objects.filter(instrument=instrument).latest('last_updated')\n except FacilityStatus.DoesNotExist:\n pass\n\n if status:\n self.status = status.status\n self.comment = status.comment\n else:\n self.status = 'Unknown'\n self.comment = ''\n\ndef get_facility_status():\n\n tel_states = []\n\n status_data = FacilityStatus.objects.all()\n\n telescopes = Telescope.objects.all()\n for tel in telescopes:\n status = TelescopeStatus()\n status.site = tel.site.name\n status.site_id = tel.site.pk\n status.telescope = tel.name\n status.telescope_id = tel.pk\n status.get_status(telescope=tel)\n tel_states.append(status)\n\n cameras = Instrument.objects.filter(telescope=tel)\n for camera in cameras:\n status = TelescopeStatus()\n status.site = tel.site.name\n status.site_id = tel.site.pk\n status.telescope = tel.name\n status.telescope_id = tel.pk\n status.get_status(instrument=camera)\n tel_states.append(status)\n\n return tel_states\n\napp = DjangoDash('FacilitiesTable')\n\ntable_columns = [dict(name='Site', id='Site', type='text', presentation='markdown'),\n dict(name='Facility', id='Facility', type='text', presentation='markdown'),\n dict(name='Instrument', id='Instrument', type='text', presentation='markdown'),\n dict(name='Status', id='Status'),\n dict(name='Comment', id='Comment')]\n\nstatus_list = get_facility_status()\n\ntable_data = []\nif status_list:\n for tel_status in status_list:\n table_data.append( dict(Site=format_link_entry(tel_status.site, '/site/'+str(tel_status.site_id)+'/'),\n Facility=format_link_entry(tel_status.telescope, '/telescope/'+str(tel_status.telescope_id)+'/'),\n Instrument=format_link_entry(tel_status.instrument, '/instrument/'+str(tel_status.instrument_id)+'/'),\n Status=tel_status.status,\n Comment=tel_status.comment) )\n\napp.layout = html.Div( dash_table.DataTable(\n id='FacilitiesTable',\n columns=table_columns,\n data=table_data,\n sort_action=\"native\",\n filter_action=\"native\",\n style_table={'height': '600px', 'overflowY': 'auto'},\n style_cell={'fontSize':18, 'font-family':'sans-serif'},\n style_cell_conditional=[\n { 'if': {'column_id': 'Status'},\n 'backgroundColor': 'white',\n 'color': 'black' },\n { 'if': {'column_id': 'Status',\n 'filter_query': '{Status} = \"Offline\"'},\n 'backgroundColor': 'rgb(83, 7, 105)',\n 'color': 'white' },\n { 'if': {'column_id': 'Status',\n 'filter_query': '{Status} = \"Open\"'},\n 'backgroundColor': 'rgb(50, 168, 82)',\n 'color': 'white' },\n { 'if': {'column_id': 'Status',\n 'filter_query': '{Status} = \"Closed-weather\"'},\n 'backgroundColor': 'rgb(26, 80, 196)',\n 'color': 'white' },\n { 'if': {'column_id': 'Status',\n 'filter_query': '{Status} = \"Closed-unsafe\"'},\n 'backgroundColor': 'rgb(224, 132, 40)',\n 'color': 'white' },\n { 'if': {'column_id': 'Status',\n 'filter_query': '{Status} = \"Closed-daytime\"'},\n 'backgroundColor': 'rgb(218, 224, 40)',\n 'color': 'white' },\n { 'if': {'column_id': 'Status',\n 'filter_query': '{Status} = \"Unknown\"'},\n 'backgroundColor': 'rgb(168, 160, 160)',\n 'color': 'white' },\n { 'if': {\n 'column_id': 'Status' # 'text' | 'any' | 'datetime' | 'numeric'\n },\n 'textAlign': 'left'},\n { 'if': {\n 'column_id': 'Comment' # 'text' | 'any' | 'datetime' | 'numeric'\n },\n 'textAlign': 'left'},\n ],\n ) )\n","repo_name":"rachel3834/observatory_status_system","sub_path":"oss/facilities_table_app.py","file_name":"facilities_table_app.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34047375216","text":"x = input('Type what do you want to reverse:')\nb = ''.join(reversed(x))\n\nprint(b)\n\nprint(x[::-1])\n\ndef oddEvenCount(x):\n odd_count = 0\n even_count = 0\n for i in x:\n if i % 2 == 0:\n even_count += 1\n else:\n odd_count += 1\n\n return \"odd count: \" + str(odd_count) + \"\\teven count: \" + str(even_count)\n\nx = [1,2,3,4,5,6,7,8,9,10,11]\nprint(oddEvenCount(x))\n\ndata = [1452, 11.23, 1+2j, True, 'w3resource', (0, -1), [5, 12], {\"class\":'V', \"section\":'A'}]\n\nfor icecream in data:\n print('Type of ', icecream, ' is ', type(icecream))\n\nfor i in range(7):\n if i != 3 and i != 6:\n print(i)\n\nprint('\\n')\n\nfor moomoo in range(1, 50):\n if moomoo % 3 == 0 and moomoo % 5 == 0:\n print('SUPER BADASS!!!')\n elif moomoo % 5 == 0:\n print('BADASS')\n elif moomoo % 3 == 0:\n print('SUPAA')\n else:\n print(moomoo)\n","repo_name":"gsaukov/python-machine","sub_path":"core/days100/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37712397998","text":"# 171.Excel 表列序号\nfrom string import ascii_uppercase\n\n\nclass Solution:\n # def titleToNumber(self, s: str) -> int:\n # # 不能写成 a in ascii_uppercase b in range(1, 27),这样就是全排列了\n # dic = {a: b for a, b in zip(ascii_uppercase, range(1, 27))}\n # res = 0\n # for c in s:\n # res *= 26\n # res += dic[c]\n # return res\n\n def titleToNumber(self, s: str) -> int:\n base = ord('A')\n res = 0\n for c in s:\n res *= 26\n res += ord(c) - base + 1\n return res\n\n\ns = Solution()\nprint(s.titleToNumber('A'))\n\nprint(s.titleToNumber('AB'))\n\nprint(s.titleToNumber('ZY'))\n","repo_name":"BruceHi/leetcode","sub_path":"month11/titleToNumber.py","file_name":"titleToNumber.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16843566301","text":"import numpy as np\nfrom typing import List\nimport cv2\nimport os\n\ndef read_npys(dir: str) -> List:\n npys = os.listdir(dir)\n return npys\n\ndef convert_to_binary_mask(p_dir, out_dir, npy_list):\n for name in npy_list:\n npy = np.load(os.path.join(p_dir, name))\n npy[np.where(npy>0)] = 255\n cv2.imwrite(f\"{out_dir}/{name.split('.')[0]}.png\", npy)\n\n\ndef convert_to_instance_mask(p_dir, out_dir, npy_list):\n for name in npy_list:\n npy = np.load(os.path.join(p_dir, name))\n instance_img = np.zeros((npy.shape[0], npy.shape[1], 3), dtype=np.uint8)\n for val in np.unique(npy)[1:]:\n instance_img[np.where(npy==val)] = list(np.random.choice(range(256), size=3))\n cv2.imwrite(f\"{out_dir}/{name.split('.')[0]}.png\", instance_img)\n\n\ndef main():\n mask_dir = \"/home/krystal/workspace/dataset/monuseg-3/test\"\n size_mask = \"ins_npy/256\"\n npys_list = read_npys(os.path.join(mask_dir, size_mask))\n binary_save = os.path.join(mask_dir, 'bin_masks', '256')\n if not os.path.exists(binary_save):\n os.makedirs(binary_save, exist_ok=True)\n convert_to_binary_mask(os.path.join(mask_dir, size_mask), binary_save, npys_list)\n \n instance_save = os.path.join(mask_dir, 'ins_masks'+'256')\n if not os.path.exists(instance_save):\n os.makedirs(instance_save, exist_ok=True)\n convert_to_instance_mask(os.path.join(mask_dir, size_mask), instance_save, npys_list)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"krystal-kk/double-unet-aspp","sub_path":"dataset_utils/make_generation.py","file_name":"make_generation.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30669749262","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QKeySequence\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWebEngineWidgets import *\nfrom PyQt5.QtPrintSupport import *\nfrom browser_tabbed import *\nimport os\nimport sys\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n self.tabs = QTabWidget()\n self.tabs.setDocumentMode(True)\n self.tabs.tabBarDoubleClicked.connect(self.tab_open_doubleclick)\n self.tabs.currentChanged.connect(self.current_tab_changed)\n self.tabs.setTabsClosable(True)\n self.tabs.tabCloseRequested.connect(self.close_current_tab)\n\n # Control to Tabs\n self.shortcut_open = QShortcut(QKeySequence('Ctrl+N'), self)\n self.shortcut_open.activated.connect(self.add_new_tab)\n # self.shortcut_close = QShortcut(QKeySequence('Ctrl+W'), self)\n # self.shortcut_close.activated.connect(self.close_tabs)\n self.shortcut_close = QShortcut(QKeySequence('Ctrl+S'), self)\n self.shortcut_close.activated.connect(self.save_file)\n self.shortcut_close = QShortcut(QKeySequence('Ctrl+O'), self)\n self.shortcut_close.activated.connect(self.open_file)\n\n # Navigation shortcurt\n self.shortcut_close = QShortcut(\n QKeySequence('Ctrl+Z'), self)\n self.shortcut_close.activated.connect(\n lambda: self.tabs.currentWidget().back())\n self.shortcut_close = QShortcut(\n QKeySequence('Ctrl+Shift+Z'), self)\n self.shortcut_close.activated.connect(\n lambda: self.tabs.currentWidget().forward())\n\n self.setCentralWidget(self.tabs)\n self.status = QStatusBar()\n self.setStatusBar(self.status)\n navtb = QToolBar(\"Navegacion\")\n navtb.setIconSize(QSize(23, 23))\n self.addToolBar(navtb)\n\n # Configuration action Widget\n back_btn = QAction(\n QIcon(os.path.join('images', 'arrow-180.png')), \"Volver\", self)\n back_btn.setStatusTip(\"Volver a la Pagina anterior\")\n back_btn.triggered.connect(lambda: self.tabs.currentWidget().back())\n navtb.addAction(back_btn)\n\n next_btn = QAction(\n QIcon(os.path.join('images', 'arrow-000.png')), \"Siguiente\", self)\n next_btn.setStatusTip(\"Siguiente Pagina\")\n next_btn.triggered.connect(lambda: self.tabs.currentWidget().forward())\n navtb.addAction(next_btn)\n\n reload_btn = QAction(\n QIcon(os.path.join('images', 'arrow-circle-315.png')), \"Recargar\", self)\n reload_btn.setStatusTip(\"Recargar Pagina\")\n reload_btn.triggered.connect(\n lambda: self.tabs.currentWidget().reload())\n navtb.addAction(reload_btn)\n\n home_btn = QAction(\n QIcon(os.path.join('images', 'home.png')), \"Home\", self)\n home_btn.setStatusTip(\"IR A Menu\")\n home_btn.triggered.connect(self.navigate_home)\n navtb.addAction(home_btn)\n\n navtb.addSeparator()\n\n self.httpsicon = QLabel()\n self.httpsicon.setPixmap(\n QPixmap(os.path.join('images', 'lock-nossl.png')))\n navtb.addWidget(self.httpsicon)\n\n self.urlbar = QLineEdit()\n self.urlbar.returnPressed.connect(self.navigate_to_url)\n navtb.addWidget(self.urlbar)\n\n stop_btn = QAction(\n QIcon(os.path.join('images', 'cross-circle.png')), \"Parar\", self)\n stop_btn.setStatusTip(\"Stop loading current page\")\n stop_btn.triggered.connect(lambda: self.tabs.currentWidget().stop())\n navtb.addAction(stop_btn)\n\n file_menu = self.menuBar().addMenu(\"&Archivo\")\n\n new_tab_action = QAction(\n QIcon(os.path.join('images', 'ui-tab--plus.png')), \"Nueva ventana\", self)\n new_tab_action.setStatusTip(\"Open a new tab\")\n new_tab_action.triggered.connect(lambda _: self.add_new_tab())\n file_menu.addAction(new_tab_action)\n\n open_file_action = QAction(\n QIcon(os.path.join('images', 'disk--arrow.png')), \"Abrir Archivos...\", self)\n open_file_action.setStatusTip(\"Open from file\")\n open_file_action.triggered.connect(self.open_file)\n file_menu.addAction(open_file_action)\n\n save_file_action = QAction(\n QIcon(os.path.join('images', 'disk--pencil.png')), \"Save Page As...\", self)\n save_file_action.setStatusTip(\"Save current page to file\")\n save_file_action.triggered.connect(self.save_file)\n file_menu.addAction(save_file_action)\n\n print_action = QAction(\n QIcon(os.path.join('images', 'printer.png')), \"Print...\", self)\n print_action.setStatusTip(\"Print current page\")\n print_action.triggered.connect(self.print_page)\n file_menu.addAction(print_action)\n\n help_menu = self.menuBar().addMenu(\"&Ayuda\")\n\n about_action = QAction(QIcon(os.path.join(\n 'images', 'question.png')), \"About Mozilla ITLA\", self)\n about_action.setStatusTip(\n \"Find out more about Mozilla ITLA\")\n about_action.triggered.connect(self.about)\n help_menu.addAction(about_action)\n\n navigate_mozarella_action = QAction(QIcon(os.path.join('images', 'lifebuoy.png')),\n \"Mozilla ITLA Homepage\", self)\n navigate_mozarella_action.setStatusTip(\n \"Go to Mozilla ITLA Homepage\")\n navigate_mozarella_action.triggered.connect(self.navigate_mozarella)\n help_menu.addAction(navigate_mozarella_action)\n\n self.add_new_tab(QUrl('http://www.google.com'), 'Homepage')\n\n self.show()\n\n self.setWindowTitle(\"Mozilla ITLA\")\n self.setWindowIcon(QIcon(os.path.join('images', 'ma-icon-64.png')))\n\n # def shortcut():\n # self.shortcut_open = QShortcut(QKeySequence('Ctrl+O'), self)\n # self.shortcut_open.activated.connect(self.on_open)\n\n def add_new_tab(self, qurl=None, label=\"Blank\"):\n\n if qurl is None:\n qurl = QUrl('http://www.google.com')\n\n browser = QWebEngineView()\n browser.setUrl(qurl)\n i = self.tabs.addTab(browser, label)\n\n self.tabs.setCurrentIndex(i)\n\n # if qurl.container(\".com\"):\n # browser.setUrl(qurl)\n # i = self.tabs.addTab(browser, label);\n # if qurl:\n # browser.setUrl(f'qurl'+'.com')\n # i = self.tabs.addTab(browser, label)\n # self.tabs.setCurrentIndex(i)\n\n # More difficult! We only want to update the url when it's from the\n # correct tab\n browser.urlChanged.connect(lambda qurl, browser=browser:\n self.update_urlbar(qurl, browser))\n\n browser.loadFinished.connect(lambda _, i=i, browser=browser:\n self.tabs.setTabText(i, browser.page().title()))\n # browser.QNetworkReply\n # browser.loadFinished.disconnect(lambda _, i=i, browser=browser:\n # self.tabs.setTabText(i, browser.page().title()))\n\n def tab_open_doubleclick(self, i):\n if i == -1: # No tab under the click\n self.add_new_tab()\n\n def current_tab_changed(self, i):\n qurl = self.tabs.currentWidget().url()\n self.update_urlbar(qurl, self.tabs.currentWidget())\n self.update_title(self.tabs.currentWidget())\n\n def close_current_tab(self, i):\n if self.tabs.count() < 2:\n return\n\n self.tabs.removeTab(i)\n\n # def close_tabs(self, nub):\n # self.tabs.removeTab(nub)\n\n def update_title(self, browser):\n if browser != self.tabs.currentWidget():\n # If this signal is not from the current tab, ignore\n return\n\n title = self.tabs.currentWidget().page().title()\n self.setWindowTitle(\"%s - Mozilla ITLA\" % title)\n\n def navigate_mozarella(self):\n self.tabs.currentWidget().setUrl(QUrl(\"https://youtube.com\"))\n\n def about(self):\n dlg = AboutDialog()\n dlg.exec_()\n\n def open_file(self):\n filename, _ = QFileDialog.getOpenFileName(self, \"Abrir Archivos\", \"\",\n \"Hypertext Markup Language (*.htm *.html);;\"\n \"All files (*.*)\")\n\n if filename:\n with open(filename, 'r') as f:\n html = f.read()\n\n self.tabs.currentWidget().setHtml(html)\n self.urlbar.setText(filename)\n\n def save_file(self):\n filename, _ = QFileDialog.getSaveFileName(self, \"Guarda Pagina como\",\n \"Hypertext Markup Language (*.htm *html);;\"\n \"All files (*.*)\")\n\n if filename:\n html = self.tabs.currentWidget().page().toHtml()\n with open(filename, 'w') as f:\n f.write(html.encode('utf8'))\n\n def print_page(self):\n dlg = QPrintPreviewDialog()\n dlg.paintRequested.connect(self.browser.print_)\n dlg.exec_()\n\n def navigate_home(self):\n\n self.tabs.currentWidget().setUrl(QUrl.fromPercentEncoding(\"http://www.google.com\"))\n\n # def closeApp(self):\n # self.tabs. setTabsClosable()\n # # app.quit()\n\n def navigate_to_url(self): # Does not receive the Url\n q = QUrl(self.urlbar.text())\n if q.scheme() == \"\":\n q.setScheme(\"http\")\n\n self.tabs.currentWidget().setUrl(q)\n\n def update_urlbar(self, q, browser=None):\n\n if browser != self.tabs.currentWidget():\n # If this signal is not from the current tab, ignore\n return\n\n if q.scheme() == 'https':\n # Secure padlock icon\n self.httpsicon.setPixmap(\n QPixmap(os.path.join('images', 'lock-ssl.png')))\n\n else:\n # Insecure padlock icon\n self.httpsicon.setPixmap(\n QPixmap(os.path.join('images', 'lock-nossl.png')))\n\n self.urlbar.setText(q.toString())\n self.urlbar.setCursorPosition(0)\n\n\n# EJecutacion de la apliacaicon\napp = QApplication(sys.argv)\napp.setApplicationName(\"Mozilla ITLA\")\napp.setOrganizationName(\"Mozilla ITLA\")\napp.setOrganizationDomain(\"Mozilla ITLA\")\n\nwindow = MainWindow()\n\napp.exec_()\n","repo_name":"uppy19d0/web_browser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18973688525","text":"import random\nimport os\nimport time\n\n\ndef initField(filedLen):\n if filedLen > 10 or filedLen <=2:\n print(r'Размер поля не должен быть больше 10 и меньше 2\\nЭто очевидно! В наказание, будет тебе маленькое поле, играй так.')\n filedLen = 2\n field = []\n \n for i in range(0,filedLen):\n elem = []\n for k in range(0,filedLen):\n elem.append(0)\n field.append(elem)\n\n def choseItem(filedLen):\n stats = []\n startItems = [2,2,2,2,2,2,2,2,2,4]\n\n for i in range(0,filedLen):\n stats.append([random.randint(0,filedLen-1),\n random.randint(0,filedLen-1),\n startItems[random.randint(0,9)]])\n return stats\n\t\n stats = choseItem(filedLen)\n\n for val in stats:\n field[val[0]][val[1]] = val[2]\n\n return field\n\n\n\n\ndef vertical(arr, direction):\n for i in range(len(arr)):\n preArr = []\n \n for item in arr:\n preArr.append(item[i])\n \n while preArr.count(0) != 0:\n preArr.remove(0)\n \n if direction.lower() == 'down': \n preArr = preArr[::-1]\n \n for key in range(len(preArr)):\n if (key+1) < len(preArr):\n if preArr[key] == preArr[key+1]:\n preArr[key] = int(preArr[key])*2\n preArr.pop(key+1)\n else:\n continue\n else:\n break \n \n while len(preArr) != len(arr):\n preArr.append(0) \n \n if direction.lower() == 'down': \n preArr = preArr[::-1] \n \n for key, value in enumerate(preArr):\n arr[key][i] = value\n \n arr, cont, state = nextStep(arr)\n print(cont) \n return arr\n\n\n\n\ndef horizontal(arr,direction):\n for i, elem in enumerate(arr):\n while elem.count(0) != 0:\n elem.remove(0)\n \n if direction.lower() == 'right': \n elem = elem[::-1]\n \n for key in range(len(elem)):\n if (key+1) < len(elem):\n if elem[key] == elem[key+1]:\n elem[key] = int(elem[key])*2\n elem.pop(key+1)\n else:\n continue\n else:\n break\n \n while len(elem) != len(arr):\n elem.append(0)\n \n if direction.lower() == 'right': \n elem = elem[::-1]\n \n arr[i] = elem\n \n arr, cont, state = nextStep(arr)\n print(cont) \n return arr\n \n \ndef nextStep(arr):\n coords = []\n for key, value in enumerate(arr):\n while value.count(0) != 0:\n index = value.index(0)\n data = str(key)+'.'+str(index) \n coords.append(data)\n value[index] = '*'\n while value.count('*') != 0:\n index = value.index('*')\n value[index] = 0\n if len(coords) != 0: \n index = random.choice(coords)\n coordX = int(index[0])\n coordY = int(index[2])\n arr[coordX][coordY] = 2\n game = 'Game Must Go On'\n state = True\n return arr, game, state\n else:\n game = 'Game Over'\n state = False\n return arr, game, state\n\ndef scors(arr):\n scors = 0\n for i in arr:\n for k in i:\n scors = scors + k\n return int(scors) \n\n\ndef firstOpen(fileForFile):\n os.system(['clear', 'cls'][os.name == os.sys.platform])\n\n with open(fileForFile,'r') as file:\n for line in file:\n print(line, end = '\\n')\n time.sleep(3)\n os.system(['clear', 'cls'][os.name == os.sys.platform])\n\n \n\nif __name__ == '__main__':\n\n firstOpen(r'res/load.txt')\n\n field = initField(int(input('Размер поля: ')))\n\n for i in field:\n print(i) \n \n while True:\n print(\"Количество очков:\", scors(field), sep=' ', end=' ')\n step = input('Ход: ')\n os.system(['clear', 'cls'][os.name == os.sys.platform])\n if step == 'w':\n for i in vertical(field, 'up'): \n print(i)\n elif step == 's':\n for i in vertical(field, 'down'): \n print(i)\n elif step == 'a':\n for i in horizontal(field,'left'): \n print(i)\n elif step == 'd':\n for i in horizontal(field,'right'): \n print(i)\n else:\n break\n \n \n \n \n \n \n \n\n \n \n ","repo_name":"alekseyfastovets/Python-2048-console-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10230150184","text":"# -*- coding: utf-8 -*-\r\n\r\n## 基于光流算法的行人轨迹检测\r\nimport numpy as np\r\nimport cv2 as cv\r\ncap = cv.VideoCapture('.\\\\movie\\\\movie1.mp4')\r\n\r\n# ShiTomasi角点检测参数设置\r\nfeature_params = dict( maxCorners = 500,\r\n qualityLevel = 0.05,\r\n minDistance = 5,\r\n blockSize = 5 )\r\n\r\n# lucas-kanade光流法参数设置\r\nlk_params = dict( winSize = (15,15),\r\n maxLevel = 2,\r\n criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))\r\n\r\n# 选取第一帧并寻找角点\r\nret, old_frame = cap.read()\r\nold_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)\r\np0 = cv.goodFeaturesToTrack(old_gray, mask = None, **feature_params)\r\n\r\nmask = np.zeros_like(old_frame)\r\norigin = np.zeros_like(old_frame)\r\ncolor = np.random.randint(0, 255, (500, 3))\r\nwhile(1):\r\n ret, frame = cap.read()\r\n if(ret == False):\r\n break\r\n origin = frame.copy()\r\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n\r\n # 计算光流\r\n p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\r\n\r\n # 选择运动的角点\r\n good_new = p1[st == 1]\r\n good_old = p0[st == 1]\r\n\r\n # 绘制轨迹\r\n for i, (new, old) in enumerate(zip(good_new, good_old)):\r\n a,b = new.ravel()\r\n c,d = old.ravel()\r\n # mask = cv.line(mask, (a,b), (c,d), color[i].tolist(), 2)\r\n mask = cv.line(mask, (a,b), (c,d), [0, 0, 255], 2)\r\n frame = cv.circle(frame, (a,b), 2, [0, 0, 255], -1)\r\n\r\n img = cv.add(frame, mask)\r\n\r\n # 图片显示\r\n cv.imshow('img', img) # 在原图上实时显示轨迹\r\n cv.imshow('mask', mask) # 轨迹\r\n\r\n # 检测ESC键退出\r\n k = cv.waitKey(30) & 0xff\r\n if k == 27:\r\n break\r\n\r\n # 更新old_gray\r\n old_gray = frame_gray.copy()\r\n p0 = good_new.reshape(-1, 1, 2)\r\n\r\n# 图片保存\r\ncv.imwrite(\".\\\\pic\\\\mask.jpg\", mask)\r\ncv.imwrite(\".\\\\pic\\\\img.jpg\", img)\r\ncv.imwrite(\".\\\\pic\\\\origin.jpg\", origin)\r\n\r\n# 关闭窗口并释放资源\r\ncv.destroyAllWindows()\r\ncap.release()\r\n","repo_name":"LimingsGit/PedDect","sub_path":"src/PedDect_OpticalFlow.py","file_name":"PedDect_OpticalFlow.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"35103921229","text":"from tkinter import *\nimport tkinter.messagebox as tm\nimport db2\n\nroot = Tk()\nroot.title(\"Home \")\nroot.resizable(False, False)\n\nlogo = PhotoImage(file=\"download.png\")\nlogo2 = PhotoImage(file=\"giphy.gif\") \n\nlogo1 = PhotoImage(file=\"giphy.gif\")\nroot.label_Logo = Label(root, image=logo1)\nroot.label_Logo.pack()\n\n\nclass LoginFrame(Frame):\n def __init__(self, master):\n super().__init__(master)\n\n self.logbtn = Button(self, text=\"Buying\", command=self._login_btn_clickked_1,width=30,bg=\"#32c3ff\",\n fg=\"black\",font =('arial', 16 , 'bold'),bd=3)\n self.logbtn.grid(columnspan=2)\n self.logbtn = Button(self, text=\"Update User\", command=self._login_btn_clickked_2,width=32,bg=\"#32c3ff\",\n fg=\"black\",font =('arial', 16 , 'bold'),bd=3)\n self.logbtn.grid(columnspan=2)\n self.logbtn = Button(self, text=\"Delete User\", command=self._login_btn_clickked_3,width=34,bg=\"#32c3ff\",\n fg=\"black\",font =('arial', 16 , 'bold'),bd=3)\n self.logbtn.grid(columnspan=2)\n self.logbtn = Button(self, text=\"Add User\", command=self._login_btn_clickked_4,width=36,bg=\"#32c3ff\",\n fg=\"black\",font =('arial', 16 , 'bold'),bd=3)\n self.logbtn.grid(columnspan=2)\n self.logbtn = Button(self, text=\"Menu\", command=self._login_btn_clickked_5,width=38,bg=\"#32c3ff\",\n fg=\"black\",font =('arial', 16 , 'bold'),bd=3)\n self.logbtn.grid(columnspan=2)\n self.logbtn = Button(self, text=\"Total price\", command=self._login_btn_clickked_6,width=40,bg=\"#32c3ff\",\n fg=\"black\",font =('arial', 16 , 'bold'),bd=3)\n self.logbtn.grid(columnspan=2)\n self.logbtn = Button(self, text=\"Update Meals\", command=self._login_btn_clickked_7,width=42,bg=\"#32c3ff\",\n fg=\"black\",font =('arial', 16 , 'bold'),bd=3)\n self.logbtn.grid(columnspan=2)\n self.pack()\n\n def _login_btn_clickked_1(self):\n root.destroy()\n import Python;\n\n def _login_btn_clickked_2(self):\n root.destroy()\n import update;\n\n def _login_btn_clickked_3(self):\n root.destroy()\n import delete;\n\n def _login_btn_clickked_4(self):\n root.destroy()\n import insert_user;\n\n def _login_btn_clickked_5(self):\n root.destroy()\n import Sectect_meales;\n\n def _login_btn_clickked_6(self):\n root.destroy()\n import Sectect_TotalPrices;\n\n def _login_btn_clickked_7(self):\n root.destroy()\n import update_Meals_Sal;\n\n\nlf = LoginFrame(root)\nroot.mainloop()\n","repo_name":"abedeidgithub/Python-Project","sub_path":"home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73862504266","text":"\"\"\"main file tha launches fastapi and created hdhr instance\"\"\"\nfrom typing import Any, List\nfrom fastapi import FastAPI, Request\nfrom fastapi.exceptions import HTTPException\nfrom fastapi.responses import StreamingResponse\nfrom hd_home_run import HdHomeRun\nfrom threading import Thread\nimport uvicorn\nfrom pydantic import BaseModel\nimport json\nimport re\nimport os\nimport sys\n\n# These must be declared in the docker or shell environment\nHDHR_IP = os.getenv(\"HDHR_IP\")\nHOST_IP = os.getenv(\"HOST_IP\")\nif HDHR_IP is None or HOST_IP is None:\n print(\"ERROR: missing HDHR_IP or HOSTIP\")\n sys.exit(1)\n\n# These config options are optionl\n# Set to 1 to reverse the DeviceID of the original HDHR.\n# This is needed for some systems like PLEX that track the DeviceID\ntry:\n DeviceID_swap = int(os.getenv(\"DEVICEID_SWAP\"))\nexcept:\n print(\"WARN: Default DeviceID_swap = 0\")\n DeviceID_swap = 0\n\napp = FastAPI()\ntune = FastAPI()\nhdhr_instance = HdHomeRun(HDHR_IP)\n\n\n@app.get(\"/\")\nasync def get_info():\n return {\n \"application\": \"hdhr-ac4\",\n \"version\": \"1.5.0\",\n \"website\": \"https://github.com/johnb-7/hdhr-ac4\",\n }\n\n\n@app.get(\"/discover.json\")\nasync def get_discover():\n original = hdhr_instance.discover()\n modified = original.replace(HDHR_IP, HOST_IP)\n\n if DeviceID_swap:\n DID_search = re.search(r'\"DeviceID\":\"([A-F0-9]+)\"', modified)\n if DID_search:\n modified = re.sub(\n r'\"DeviceID\":\"([A-F0-9]+)\"',\n r'\"DeviceID\":\"' + DID_search.group(1)[::-1] + '\"',\n modified,\n )\n return json.loads(modified)\n\n\n@app.get(\"/lineup.json\")\nasync def get_lineup():\n original_txt = hdhr_instance.lineup()\n modified_txt = original_txt.replace(HDHR_IP, HOST_IP).replace(\n '\"ATSC3\":1', '\"AudioCodec\":\"AC3\"'\n )\n original_json = json.loads(modified_txt)\n modified_json = []\n for entry in original_json:\n if \"VideoCodec\" in entry and entry[\"VideoCodec\"] == \"HEVC\":\n print(entry)\n modified_json.append(entry)\n return modified_json\n\n\n@app.get(\"/lineup_status.json\")\nasync def get_lineup_status():\n original_json = hdhr_instance.lineup_status()\n return json.loads(original_json)\n\n\n@tune.get(\"/auto/{channel}\")\nasync def in_channel(channel: str, request: Request) -> Any:\n return hdhr_instance.tune(channel, request)\n\n\nif __name__ == \"__main__\":\n # only for dev, prod runs through uvicorn command line\n app_thread = Thread(\n target=uvicorn.run, kwargs={\"app\": app, \"port\": 80, \"host\": \"0.0.0.0\"}\n )\n tune_thread = Thread(\n target=uvicorn.run, kwargs={\"app\": tune, \"port\": 5004, \"host\": \"0.0.0.0\"}\n )\n app_thread.start()\n tune_thread.start()\n app_thread.join()\n tune_thread.join()\n","repo_name":"johnb-7/hdhr-ac4","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"72849559624","text":"from orator.migrations import Migration\n\n\nclass CreatePermissionsRolesPivotTable(Migration):\n def up(self):\n \"\"\"\n Run the migrations.\n \"\"\"\n with self.schema.create(\"permissions_roles\") as table:\n table.big_integer(\"permission_id\")\n table.foreign(\"permission_id\").references(\"id\").on(\"permissions\").on_delete(\n \"cascade\"\n )\n table.big_integer(\"role_id\")\n table.foreign(\"role_id\").references(\"id\").on(\"roles\").on_delete(\"cascade\")\n\n # Attach permissions to administrator role\n role_administrator = (\n self.db.table(\"roles\").where(\"name\", \"administrator\").first()\n )\n\n permission_administrate = (\n self.db.table(\"permissions\").where(\"name\", \"administrate\").first()\n )\n\n self.db.table(\"permissions_roles\").insert(\n [\n {\n \"permission_id\": permission_administrate.id,\n \"role_id\": role_administrator.id,\n }\n ]\n )\n\n # Attach permissions to user role\n role_user = self.db.table(\"roles\").where(\"name\", \"user\").first()\n\n permission_create_blog = (\n self.db.table(\"permissions\").where(\"name\", \"create_blog\").first()\n )\n permission_create_reference = (\n self.db.table(\"permissions\").where(\"name\", \"create_reference\").first()\n )\n permission_create_course = (\n self.db.table(\"permissions\").where(\"name\", \"create_course\").first()\n )\n permission_create_syllabus = (\n self.db.table(\"permissions\").where(\"name\", \"create_syllabus\").first()\n )\n\n self.db.table(\"permissions_roles\").insert(\n [\n {\"permission_id\": permission_create_blog.id, \"role_id\": role_user.id},\n {\n \"permission_id\": permission_create_reference.id,\n \"role_id\": role_user.id,\n },\n {\"permission_id\": permission_create_course.id, \"role_id\": role_user.id},\n {\n \"permission_id\": permission_create_syllabus.id,\n \"role_id\": role_user.id,\n },\n ]\n )\n\n # Attach permissions to premium role.\n role_premium_user = self.db.table(\"roles\").where(\"name\", \"premium_user\").first()\n\n permission_use_domain = (\n self.db.table(\"permissions\").where(\"name\", \"use_domain\").first()\n )\n\n self.db.table(\"permissions_roles\").insert(\n [\n {\n \"permission_id\": permission_use_domain.id,\n \"role_id\": role_premium_user.id,\n }\n ]\n )\n\n def down(self):\n \"\"\"\n Revert the migrations.\n \"\"\"\n self.schema.drop(\"permissions_roles\")\n","repo_name":"erikwestlund/zuhanden","sub_path":"databases/migrations/2019_11_03_141323_create_permissions_roles_pivot_table.py","file_name":"2019_11_03_141323_create_permissions_roles_pivot_table.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"35122931280","text":"import uuid\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth.models import Group, Permission\nfrom django.db import models\n\nfrom django.contrib.auth.models import AbstractUser\nfrom school.models import Schools\n\nfrom .managers import CustomUserManager\n\nclass Subject(models.Model):\n id = models.UUIDField(primary_key=True, editable=False, default=uuid.uuid4)\n name = models.CharField(max_length=30)\n active = models.BooleanField(default=True)\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass Marks(models.Model):\n student = models.ForeignKey('student.Student', on_delete=models.CASCADE)\n class_subject = models.ForeignKey('school.ClassSubject', on_delete=models.CASCADE)\n marks = models.FloatField(default=0)\n\n def __str__(self):\n return f'{self.student.name} - {self.class_subject.subject.name}'\n\n\nclass User(AbstractUser):\n designation = models.CharField(max_length=30)\n school = models.ForeignKey(Schools, on_delete=models.CASCADE, blank=True, null=True)\n active = models.BooleanField(default=True)\n\n groups = models.ManyToManyField(Group, blank=True, related_name='main_users')\n user_permissions = models.ManyToManyField(\n Permission,\n blank=True,\n related_name='main_users',\n verbose_name=_('user permissions'),\n help_text=_(\n 'Specific permissions for this user.'\n 'Note: This only affects permissions checked by the \"has_permission\" method on models.'\n ),\n )\n\n objects = CustomUserManager()\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n phone_number = models.CharField(max_length=20)\n address = models.CharField(max_length=200)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\n class Meta:\n verbose_name_plural = 'Employee'\n\n\nclass ClassSubject(models.Model):\n title = models.CharField(max_length=50)\n grade = models.ForeignKey('Class.Class', on_delete=models.CASCADE)\n employee = models.ForeignKey(Employee, on_delete=models.CASCADE, related_name='class_subjects')\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name_plural = 'ClassSubject'\n","repo_name":"WajahatKanju/LMS","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72447901064","text":"from parse import parse_blogtext\nfrom form_embeddings import form_embeddings\nfrom procrustes_align import smart_procrustes_align_gensim\nfrom align_embeddings import align_embeddings\nfrom top_differences import get_top_differences, get_differences_for_word\nfrom itertools import combinations\n\nuse_sg=1\n\nshould_parse_blogtext = True\nshould_form_embeddings = True\nshould_align_embeddings = True\nshould_find_top_cosine_differences = True\nshould_find_differences_for_word = True\n\nfirst_age_partition = 0\nsecond_age_partition = 2\n\nif should_parse_blogtext:\n print(\"Parsing blog text\")\n parse_blogtext()\n\nif should_form_embeddings:\n print(\"Forming embeddings\")\n form_embeddings(use_sg=use_sg)\n\nif should_align_embeddings:\n print(\"Aligning embeddings\")\n for firstindex, secondindex in combinations(range(3), 2):\n align_embeddings(firstindex, secondindex, use_sg=use_sg)\n\nif should_find_top_cosine_differences:\n print(\"Getting top differences\")\n get_top_differences(first_age_partition, second_age_partition, use_sg)\n\nif should_find_differences_for_word:\n print(\"Finding differences for a given word:\")\n word = input(\"What word would you like to find differences for?\")\n get_differences_for_word(first_age_partition, second_age_partition, use_sg=use_sg, word_to_find=word)\n","repo_name":"dem1995/age-embeddings","sub_path":"main_for_prof.py","file_name":"main_for_prof.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2035057208","text":"import gspread, datetime\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\ncreds = ServiceAccountCredentials.from_json_keyfile_name('client-secrets.json', scope)\nclient = gspread.authorize(creds)\n\nsheet = client.open(\"Shot logger\").sheet1\n\n\n\ndt = datetime.datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\ntoday = datetime.datetime.now().strftime(\"%Y/%m/%d\")\n\nsheet.append_row([dt, \"test\"])\n\n\nentries = sheet.get_all_records()\n\nshotcounter = 0\nfor entry in entries:\n if (entry[\"datetime\"][0:10] == today):\n shotcounter += 1\n\nprint (shotcounter)\n\n","repo_name":"nicksmadscience/adhd-lifehacks","sub_path":"sheet-test.py","file_name":"sheet-test.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32505131056","text":"from typing import Any\nfrom unittest.mock import MagicMock, call, patch\n\nimport pytest\nfrom scrapy import Selector\nfrom scrapy.http import Response\n\nfrom web_scraper.spiders import DaftSaleUsedSpider, DaftExtractor, ExtractorException, DAFT_ADDRESS, \\\n PROPERTIES_FOR_SALE, PROPERTY_CARD_SELECTOR, PROPERTY_TYPE_SELECTOR, PRICE_SELECTOR, \\\n FLOOR_AREA_SELECTOR, MAIN_ADDRESS_SELECTOR, STREET_VIEW_SELECTOR, DESCRIPTION_SELECTOR, STATISTICS_SELECTOR, \\\n BER_RATING_ALT_SELECTOR, BEDS_SELECTOR, BATHS_SELECTOR, PAGE_SIZE, DEFAULT_PAGE_SIZE, IRELAND_AREA\n\nNEXT_PAGE_FULL_ADDRESS = 'NEXT_PAGE_FULL_ADDRESS'\n\nNEXT_PAGE_ADDRESS = 'next_page'\n\nPROPERTY_TYPE = 'Property type'\nPROPERTY_TYPE_RAW = '\\n Property type\\n '\n\nBER_RATING = 'A1'\nBER_RATING_RAW_EXEMPT = 'SI_666'\nBER_RATING_EXEMPT = None\n\nPRICE_RAW = '€375,000'\nPRICE = 375000\nPRICE_BIG_RAW = '€2,750,000'\nPRICE_BIG = 2750000\nPRICE_NO_VALUE_RAW = '\\n'\nPRICE_NO_VALUE = None\nPRICE_INVALID = None\nPRICE_INVALID_RAW = 'INVALID'\nPRICE_ON_APPLICATION_RAW = 'Price On Application'\nPRICE_INVALID_EXCEPTION_MSG = r\"Error .*pars.*price.*:'INVALID'\"\n\nBEDROOMS_RAW = '3 Bed'\nBATHROOMS_RAW = '2 Bath'\nBEDROOMS = 3\nBEDROOMS_NO_VALUE = None\nBATHROOMS = 2\nBATHROOMS_NO_VALUE = None\n\nFLOOR_AREA_RAW = '54 m²'\nFLOOR_AREA = 54\nFLOOR_AREA_DECIMAL_RAW = '50.3 m²'\nFLOOR_AREA_DECIMAL = 50.3\n\nMAIN_ADDRESS_RAW = \"Apartment 12, Stewart Hall, Ryder's Row, Dublin 1, Dublin City Centre\"\nMAIN_ADDRESS = \"Apartment 12, Stewart Hall, Ryder's Row, Dublin 1, Dublin City Centre\"\nMAIN_ADDRESS_SINGLE_RAW = 'SingleAdress'\nMAIN_ADDRESS_NO_DISTRICT_DUN_LAOGHAIRE = '7 Crofton Terrace, Dun Laoghaire, South Co. Dublin'\nMAIN_ADDRESS_NO_DISTRICT_MALAHIDE = '44 The Walk, Robswalls, Malahide, North Co. Dublin'\nMAIN_ADDRESS_SECTOR = 'Dublin City Centre'\nMAIN_ADDRESS_NO_DISTRICT_DUN_LAOGHAIRE_SECTOR = 'South Co. Dublin'\nMAIN_ADDRESS_NO_DISTRICT_MALAHIDE_SECTOR = 'North Co. Dublin'\nMAIN_ADDRESS_DISTRICT = 'Dublin 1'\nMAIN_ADDRESS_REGION = \"Ryder's Row\"\nMAIN_ADDRESS_NO_DISTRICT_DUN_LAOGHAIRE_REGION = 'Dun Laoghaire'\nMAIN_ADDRESS_NO_DISTRICT_MALAHIDE_REGION = 'Malahide'\n\nEIR_CODE_RAW = ['\\n ', ' D04\\xa0TD53\\n ']\nEIR_CODE = 'D04-TD53'\nEIR_CODE_NO_VALUE_RAW = []\nEIR_CODE_NO_VALUE = None\n\nGEOLOCATION_RAW = 'https://www.google.com/maps/@?api=1&map_action=pano&viewpoint=53.329523,-6.278734'\nGEOLOCATION = '53.329523,-6.278734'\n\nDESCRIPTION_RAW = [\"\\n A wonderful opportunity \", '\\n',\n '\\nThis attractive double ', \", Palmerston Park, local \",\n '\\nSide entrance\\n ',\n 'Make it stand out and get up to 15x more views']\nDESCRIPTION = \"A wonderful opportunity This attractive double , Palmerston Park,\" \\\n \" local Side entrance Make it stand out and get up to 15x more views\"\n\nSTATISTICS_RAW = ['19.04.2020', '2914']\nUPDATED_AT = '2020-04-19'\nVIEWS = 2914\n\nURL = 'url'\n\nSHORT_LINK_1 = '/link_1'\nSHORT_LINK_2 = '/link_2'\n\nFULL_LINK_1 = DAFT_ADDRESS + SHORT_LINK_1\nFULL_LINK_2 = DAFT_ADDRESS + SHORT_LINK_2\n\nINITIAL_URL = DAFT_ADDRESS + PROPERTIES_FOR_SALE\nINITIAL_ARGS = f\"{PAGE_SIZE}={DEFAULT_PAGE_SIZE}&\"\nINITIAL_AREA1 = \"dublin-2-dublin\"\nINITIAL_AREA2 = \"dublin-3-dublin\"\nINITIAL_AREAS_ARG = \"location=dublin-2-dublin&location=dublin-3-dublin&\"\nMIN_PRICE = 1000\nMAX_PRICE = 1000000\nMIN_AND_MAX_PRICE_URL = 'salePrice_from=1000&salePrice_to=1000000&'\nMIN_BEDS = 2\nMAX_BEDS = 4\nMIN_AND_MAX_BEDS_URL = 'numBeds_from=2&numBeds_to=4&'\n\n\n@pytest.fixture\ndef daft_sale_used():\n return DaftSaleUsedSpider()\n\n\n@pytest.fixture\ndef response():\n result = Response(URL)\n result.request = Response(URL)\n return result\n\n\n@pytest.mark.parametrize(\"spider, expected_start_url\", [\n (DaftSaleUsedSpider(),\n INITIAL_URL + \"/\" + IRELAND_AREA + \"?\" + INITIAL_ARGS),\n (DaftSaleUsedSpider(locations=[INITIAL_AREA1]),\n INITIAL_URL + \"/\" + INITIAL_AREA1 + \"?\" + INITIAL_ARGS),\n (DaftSaleUsedSpider(locations=[INITIAL_AREA1, INITIAL_AREA2]),\n INITIAL_URL + \"/\" + IRELAND_AREA + \"?\" + INITIAL_AREAS_ARG + INITIAL_ARGS),\n (DaftSaleUsedSpider(min_price=MIN_PRICE, max_price=MAX_PRICE),\n INITIAL_URL + \"/\" + IRELAND_AREA + \"?\" + INITIAL_ARGS + MIN_AND_MAX_PRICE_URL),\n (DaftSaleUsedSpider(locations=[INITIAL_AREA1, INITIAL_AREA2], min_price=MIN_PRICE, max_price=MAX_PRICE),\n INITIAL_URL + \"/\" + IRELAND_AREA + \"?\" + INITIAL_AREAS_ARG + INITIAL_ARGS + MIN_AND_MAX_PRICE_URL),\n (DaftSaleUsedSpider(locations=[INITIAL_AREA1, INITIAL_AREA2], min_beds=MIN_BEDS, max_beds=MAX_BEDS),\n INITIAL_URL + \"/\" + IRELAND_AREA + \"?\" + INITIAL_AREAS_ARG + INITIAL_ARGS + MIN_AND_MAX_BEDS_URL),\n])\ndef test_daft_sale_used_spider_init(spider, expected_start_url):\n assert spider.start_urls == [expected_start_url]\n assert spider.start_from == 0\n\n\n@patch('web_scraper.spiders.Request')\ndef test_daft_sale_should_parse_property_cards(request_mock, daft_sale_used):\n mock_list_response = MagicMock()\n property1 = _generate_selector(SHORT_LINK_1)\n property2 = _generate_selector(SHORT_LINK_2)\n\n mock_list_response.css.side_effect = [[property1, property2], []]\n\n results = _get_results_from_parsing_response(daft_sale_used, mock_list_response)\n assert len(results) == 3\n\n mock_list_response.css.assert_has_calls(\n [call(PROPERTY_CARD_SELECTOR)])\n property1.get.assert_called_once()\n property2.get.assert_called_once()\n\n request_mock.assert_has_calls([call(FULL_LINK_1, callback=daft_sale_used.parse_detailed_page),\n call(FULL_LINK_2, callback=daft_sale_used.parse_detailed_page)])\n\n\n@patch('web_scraper.spiders.Request')\ndef test_daft_sale_should_try_to_parse_next_page(request_mock, daft_sale_used):\n mock_list_response = MagicMock()\n property1 = _generate_selector(SHORT_LINK_1)\n property2 = _generate_selector(SHORT_LINK_2)\n\n mock_list_response.css.side_effect = [[property1, property2], []]\n\n results = _get_results_from_parsing_response(daft_sale_used, mock_list_response)\n assert len(results) == 3\n\n request_mock.assert_has_calls([call(\n daft_sale_used.base_url + daft_sale_used.base_url_args + f\"from={DEFAULT_PAGE_SIZE}&\",\n callback=daft_sale_used.parse)])\n\n # stops on the second page since there is nothing on the page\n results = _get_results_from_parsing_response(daft_sale_used, mock_list_response)\n assert len(results) == 0\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_property_link(extractor, daft_sale_used, response):\n data_generator = daft_sale_used.parse_detailed_page(response)\n\n results = [value for value in data_generator]\n\n assert len(results) == 1\n assert results[0]['link'] == 'url'\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_property_type(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_property_type,\n PROPERTY_TYPE, 'property_type')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_ber_rating(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_ber_rating,\n BER_RATING, 'ber_rating')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_price(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_price,\n PRICE, 'price')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_bedrooms(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_bedrooms,\n BEDROOMS, 'bedrooms')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_bathrooms(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_bathrooms,\n BATHROOMS, 'bathrooms')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_floor_area(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_floor_area,\n FLOOR_AREA, 'floor_area_m2')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_main_address(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_main_address,\n MAIN_ADDRESS, 'main_address')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_sector(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_sector,\n MAIN_ADDRESS_SECTOR, 'sector')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_region(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_region,\n MAIN_ADDRESS_DISTRICT, 'region')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_geolocation(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_geolocation,\n GEOLOCATION, 'geolocation')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_description(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_description,\n DESCRIPTION, 'description')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_updated_at(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_updated_at,\n UPDATED_AT, 'updated_at')\n\n\n@patch('web_scraper.spiders.DaftExtractor')\ndef test_daft_sale_should_parse_the_views(extractor, daft_sale_used, response):\n _assert_extractor_called(daft_sale_used, response, extractor.extract_views,\n VIEWS, 'views')\n\n\ndef test_daft_extractor_should_extract_property_type():\n _assert_parsed_by_extractor(DaftExtractor.extract_property_type, PROPERTY_TYPE_SELECTOR,\n PROPERTY_TYPE_RAW, PROPERTY_TYPE)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (BER_RATING, BER_RATING),\n (BER_RATING_RAW_EXEMPT, BER_RATING_EXEMPT),\n])\ndef test_daft_extractor_should_extract_ber_rating(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_ber_rating,\n BER_RATING_ALT_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value, exception_msg', [\n (PRICE_RAW, PRICE, None),\n (PRICE_BIG_RAW, PRICE_BIG, None),\n (PRICE_NO_VALUE_RAW, PRICE_NO_VALUE, None),\n (PRICE_ON_APPLICATION_RAW, PRICE_NO_VALUE, None),\n (PRICE_INVALID_RAW, PRICE_INVALID, PRICE_INVALID_EXCEPTION_MSG),\n])\ndef test_daft_extractor_should_extract_price(raw_value, expected_value, exception_msg):\n _assert_parsed_by_extractor(DaftExtractor.extract_price,\n PRICE_SELECTOR, raw_value, expected_value, exception_msg)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (BEDROOMS_RAW, BEDROOMS),\n (None, BEDROOMS_NO_VALUE)\n])\ndef test_daft_extractor_should_extract_bed_rooms(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_bedrooms,\n BEDS_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (BATHROOMS_RAW, BATHROOMS),\n (None, BATHROOMS_NO_VALUE),\n])\ndef test_daft_extractor_should_extract_bath_rooms(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_bathrooms,\n BATHS_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (FLOOR_AREA_RAW, FLOOR_AREA),\n (FLOOR_AREA_DECIMAL_RAW, FLOOR_AREA_DECIMAL),\n])\ndef test_daft_extractor_should_extract_floor_area(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_floor_area,\n FLOOR_AREA_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (MAIN_ADDRESS_RAW, MAIN_ADDRESS),\n (MAIN_ADDRESS_SINGLE_RAW, MAIN_ADDRESS_SINGLE_RAW),\n])\ndef test_daft_extractor_should_extract_main_address(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_main_address,\n MAIN_ADDRESS_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (MAIN_ADDRESS_RAW, MAIN_ADDRESS_SECTOR),\n (MAIN_ADDRESS_SINGLE_RAW, None),\n (MAIN_ADDRESS_NO_DISTRICT_DUN_LAOGHAIRE, MAIN_ADDRESS_NO_DISTRICT_DUN_LAOGHAIRE_SECTOR),\n (MAIN_ADDRESS_NO_DISTRICT_MALAHIDE, MAIN_ADDRESS_NO_DISTRICT_MALAHIDE_SECTOR),\n])\ndef test_daft_extractor_should_extract_sector(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_sector,\n MAIN_ADDRESS_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (MAIN_ADDRESS_RAW, MAIN_ADDRESS_REGION),\n (MAIN_ADDRESS_SINGLE_RAW, None),\n (MAIN_ADDRESS_NO_DISTRICT_DUN_LAOGHAIRE, MAIN_ADDRESS_NO_DISTRICT_DUN_LAOGHAIRE_REGION),\n (MAIN_ADDRESS_NO_DISTRICT_MALAHIDE, MAIN_ADDRESS_NO_DISTRICT_MALAHIDE_REGION),\n])\ndef test_daft_extractor_should_extract_region(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_region,\n MAIN_ADDRESS_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (GEOLOCATION_RAW, GEOLOCATION),\n])\ndef test_daft_extractor_should_extract_geolocation(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_geolocation,\n STREET_VIEW_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (DESCRIPTION_RAW, DESCRIPTION),\n])\ndef test_daft_extractor_should_extract_description(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_description,\n DESCRIPTION_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (STATISTICS_RAW, UPDATED_AT),\n])\ndef test_daft_extractor_should_extract_updated_at(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_updated_at,\n STATISTICS_SELECTOR, raw_value, expected_value)\n\n\n@pytest.mark.parametrize('raw_value, expected_value', [\n (STATISTICS_RAW, VIEWS),\n])\ndef test_daft_extractor_should_extract_views(raw_value, expected_value):\n _assert_parsed_by_extractor(DaftExtractor.extract_views,\n STATISTICS_SELECTOR, raw_value, expected_value)\n\n\ndef _generate_selector(result: Any):\n response = MagicMock(Selector)\n response.get.return_value = result\n\n return response\n\n\ndef _generate_response_for_css_selector(result: Any):\n response = MagicMock(Response)\n css_result = MagicMock()\n\n css_result.get.return_value = result\n css_result.getall.return_value = result\n response.css.return_value = css_result\n\n return response\n\n\ndef _assert_extractor_called(daft_sale_used, response, extractor_function, extractor_result,\n property_name) -> None:\n extractor_function.side_effect = [extractor_result]\n data_generator = daft_sale_used.parse_detailed_page(response)\n\n results = [value for value in data_generator]\n\n assert len(results) == 1\n assert results[0][property_name] == extractor_result\n\n extractor_function.assert_called_once_with(response)\n\n\ndef _assert_parsed_by_extractor(extractor, selector, raw_value, expected_value,\n expected_exception_regex=None):\n response = _generate_response_for_css_selector(raw_value)\n\n if not expected_exception_regex:\n result = extractor(response)\n assert result == expected_value\n else:\n with pytest.raises(ExtractorException, match=expected_exception_regex):\n extractor(response)\n\n response.css.assert_called_once_with(selector)\n\n\ndef _get_results_from_parsing_response(daft_sale_used, mock_list_response):\n detailed_request_generator = daft_sale_used.parse(mock_list_response)\n results = [value for value in detailed_request_generator]\n return results\n","repo_name":"tabelini/web_scraper","sub_path":"tests/web_scraper/test_spiders.py","file_name":"test_spiders.py","file_ext":"py","file_size_in_byte":16792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33199855836","text":"from django.shortcuts import render\nfrom .models import Produto\nfrom .forms import ProdutoForm\nfrom django.views.generic import CreateView\nfrom django.http import HttpResponseRedirect,JsonResponse\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n@login_required\ndef produtos(request):\n sitereferencia='produtos/produtos.html'\n produtos=Produto.objects.all()\n context={'produtos':produtos}\n return render(request,sitereferencia,context)\n\n@login_required\ndef detalhes(request,pk):\n sitereferencia='produtos/produto_detail.html'\n produto=Produto.objects.get(pk=pk)\n context={'produto':produto}\n return render(request,sitereferencia,context)\n\n@login_required\ndef produto_add(request):\n template_name = 'produtos/produto_add.html'\n return render(request,template_name)\n\nclass ProdutoCreate(CreateView):\n model = Produto\n template_name='produtos/produto_add.html'\n form_class=ProdutoForm\n\n@login_required\ndef criar_produto(request):\n meuformulario=ProdutoForm\n if request.method ==\"POST\":\n meuformulario=ProdutoForm(request.POST)\n if meuformulario.is_valid():\n print(meuformulario.cleaned_data)\n Produto.objects.create(**meuformulario.cleaned_data)\n else:\n print(\"Deu Erro\")\n context={\"form\":meuformulario}\n sitereferencia=\"produtos/produto_add.html\"\n return render(request,sitereferencia,context)\n\n@login_required\ndef produto_json(request, pk):\n ''' Retorna o produto, id e estoque. '''\n produto = Produto.objects.filter(pk=pk)\n data = [item.to_dict_json() for item in produto]\n return JsonResponse({'data': data})","repo_name":"italorennan/lojita","sub_path":"produtos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4699211597","text":"# This script contains a function 'get_statistics' which takes in a list of numbers\n# and returns a dictionary containing the following statistics about the numbers:\n# the mean, median, mode, sample variance, sample standard deviation,\n# and a 95% confidence interval for the mean.\n\n# This function does not use any libraries and is implemented purely using base Python.\n# It is designed to operate under the assumption of large samples from a population\n# (enough to use a Z-score of 1.96) and a normal distribution.\n\ndef get_statistics(input_list):\n n = len(input_list)\n\n # Calculate mean\n total_sum = sum(input_list)\n mean = total_sum / n\n\n # Calculate median\n sorted_list = sorted(input_list)\n if n % 2 == 0:\n median = (sorted_list[n // 2 - 1] + sorted_list[n // 2]) / 2\n else:\n median = sorted_list[n // 2]\n\n # Calculate mode\n freq_dict = {}\n for num in input_list:\n if num in freq_dict:\n freq_dict[num] += 1\n else:\n freq_dict[num] = 1\n mode = max(freq_dict, key=freq_dict.get)\n\n # Calculate variance and standard deviation\n sum_diff_sq = sum((xi - mean) ** 2 for xi in input_list)\n variance = sum_diff_sq / (n - 1)\n std_dev = variance ** 0.5\n\n # Calculate 95% confidence interval for the mean\n margin_error = 1.96 * (std_dev / (n ** 0.5))\n ci_lower = mean - margin_error\n ci_upper = mean + margin_error\n\n return {\n \"mean\": round(mean, 4),\n \"median\": round(median, 4),\n \"mode\": round(mode, 4),\n \"sample_variance\": round(variance, 4),\n \"sample_standard_deviation\": round(std_dev, 4),\n \"mean_confidence_interval\": [round(ci_lower, 4), round(ci_upper, 4)]\n }\n","repo_name":"MOHYAZZZ/Machine-learning","sub_path":"mathematical_concepts/statistics_calculator.py","file_name":"statistics_calculator.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74195779785","text":"import numpy as np\r\nimport random\r\nimport pytest\r\nimport sys\r\nsys.path.append(\"../\")\r\n\r\nfrom mlforge.perceptron.optimizers import LinearSeparable, Pocket, GradientDescent\r\nfrom mlforge.utils.data_utils import add_cons\r\nfrom mlforge.utils.operation_utils import sign\r\nfrom mlforge.losses import ZeroOneError\r\n\r\n# Fixtures\r\n\r\n@pytest.fixture\r\ndef linear_separable_data():\r\n \"\"\"generate linear Seperable data\"\"\"\r\n np.random.seed(123)\r\n x_test = np.random.rand(30, 2)\r\n w_test = np.random.rand(2)\r\n b_test = np.random.rand(1)\r\n y_test = sign(x_test @ w_test - b_test)\r\n return x_test, y_test\r\n\r\n@pytest.fixture\r\ndef non_linear_separable_data():\r\n \"\"\"generate non-separable data\"\"\"\r\n np.random.seed(123)\r\n x_test = np.random.rand(40, 2)\r\n w_test = np.random.rand(2)\r\n b_test = np.random.rand(1)\r\n margin = np.abs((x_test@w_test-b_test)/np.sqrt(np.sum(w_test**2)+b_test**2)) > 0.03\r\n x_test = x_test[margin, :]\r\n y_test = sign(x_test @ w_test - b_test)\r\n wrong_idx = np.random.randint(0, len(x_test), 1) # One mistake\r\n y_test[wrong_idx] = -1 * y_test[wrong_idx]\r\n return x_test, y_test, wrong_idx\r\n\r\n# Tests\r\n\r\ndef test_linear_seperable_correctness(linear_separable_data):\r\n x_test, y_test = linear_separable_data\r\n\r\n w_ans = LinearSeparable().execute(x_test, y_test)\r\n y_pred = sign(add_cons(x_test) @ w_ans)\r\n assert np.array_equal(y_pred, y_test) \r\n\r\n\r\ndef test_pocket_correctness_linsep_data(linear_separable_data):\r\n x_test, y_test = linear_separable_data\r\n\r\n w_ans = Pocket().execute(x_test, y_test, updates=np.Inf)\r\n y_pred = sign(add_cons(x_test) @ w_ans)\r\n assert np.array_equal(y_pred, y_test) \r\n\r\n\r\ndef test_gradient_descent_correctness_linsep_data(linear_separable_data):\r\n x_test, y_test = linear_separable_data\r\n\r\n w_ans = GradientDescent(lr=0.01).execute(x_test, y_test, epochs=np.Inf)\r\n y_pred = sign(add_cons(x_test) @ w_ans)\r\n assert np.array_equal(y_pred, y_test) \r\n\r\n\r\ndef test_pocket_correctness_nonsep_data(non_linear_separable_data):\r\n x_test, y_test, wrong_idx = non_linear_separable_data\r\n\r\n w_ans = Pocket().execute(x_test, y_test, updates=100)\r\n y_pred = sign(add_cons(x_test) @ w_ans)\r\n \r\n assert len(np.where(y_test!=y_pred)[0].tolist()) == 1\r\n assert wrong_idx[0] == np.where(y_test!=y_pred)[0][0]\r\n\r\n\r\ndef test_gradient_descent_correctness_nonsep_data(non_linear_separable_data):\r\n x_test, y_test, wrong_idx = non_linear_separable_data\r\n\r\n w_ans = GradientDescent(lr=0.01).execute(x_test, y_test, epochs=10)\r\n y_pred = sign(add_cons(x_test) @ w_ans)\r\n\r\n assert len(np.where(y_test!=y_pred)[0].tolist()) == 1\r\n assert wrong_idx[0] == np.where(y_test!=y_pred)[0][0]\r\n","repo_name":"kevinkevin556/mlforge","sub_path":"pytest/perceptron/test_perceptron_optimizers.py","file_name":"test_perceptron_optimizers.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15949351034","text":"import numpy as np\nimport pandas as pd\nimport sys\nimport os\n\nfrom qpso_strategy_finder import QPSOStrategyFinder\n\nsys.path.insert(0, '../etf_data')\nfrom etf_data_loader import load_all_data_from_file\n\nimport matplotlib.pyplot as plt\n\nstart_date = '2010-01-01'\nend_date = '2018-04-08'\n\nprefix = 'mil_'\n\n\ndef load_data():\n df_adj_close = load_all_data_from_file(prefix + 'etf_data_adj_close.csv', start_date, end_date)\n ticket_list = load_ticket_list(df_adj_close)\n print('cleaning data...')\n df_adj_close = df_adj_close[ticket_list]\n print('Backfill of data for NaNs...')\n df_adj_close = df_adj_close.fillna(method='bfill')\n print('Cleaning anomalies from data...')\n for ticket in ticket_list:\n pct = df_adj_close[ticket].pct_change()\n for i in range(len(df_adj_close[ticket])):\n if i > 0 and i < len(df_adj_close[ticket]) - 1:\n bound_lower = pct.iloc[i]\n bound_upper = pct.iloc[i + 1]\n # print('%d|%f|%f'%(i,bound_lower,bound_upper))\n if np.abs(bound_lower) > 0.1 and np.abs(bound_upper) > 0.1:\n # print('changing price from %f to %f' %(df_adj_close[ticket].iloc[i], df_adj_close[ticket].iloc[i+1]))\n df_adj_close[ticket].iloc[i] = df_adj_close[ticket].iloc[i + 1]\n\n ticket_list = remove_anomalies_tickets(df_adj_close,ticket_list)\n df_adj_close = df_adj_close[ticket_list]\n\n return ticket_list, df_adj_close\n\n\ndef load_ticket_list(df_adj_close):\n with open('../etf_data/' + prefix + 'etfs.txt', 'r') as fd:\n etf_list = list(fd.read().splitlines())\n print('Creating ticket list...')\n ticket_list = set(etf_list)\n tickets_to_remove = []\n print('Removing bad tickets')\n for ticket in ticket_list:\n if ticket not in df_adj_close.keys():\n tickets_to_remove.append(ticket)\n elif len(df_adj_close[ticket].loc[np.isnan(df_adj_close[ticket])]) == len(df_adj_close[ticket]):\n tickets_to_remove.append(ticket)\n\n ticket_list = ticket_list - set(tickets_to_remove)\n ticket_list = list(ticket_list)\n return ticket_list\n\ndef remove_anomalies_tickets(df_adj_close, ticket_list):\n ticket_list = set(ticket_list)\n tickets_to_remove = []\n print('Removing anomalies tickets')\n for ticket in ticket_list:\n if df_adj_close[ticket].pct_change().max() > 0.1:\n tickets_to_remove.append(ticket)\n\n ticket_list = ticket_list - set(tickets_to_remove)\n ticket_list = list(ticket_list)\n return ticket_list\n\n\ndata_filename = prefix + 'data.csv'\n\nif not os.path.isfile(data_filename):\n ticket_list, df_adj_close = load_data()\n df_adj_close.to_csv(data_filename, index=False)\nelse:\n print('Loading data from file %s' % data_filename)\n df_adj_close = pd.read_csv(data_filename)\n ticket_list = df_adj_close.keys()\n print(ticket_list)\n\n# print('Running data smoothing with EWM...')\n# df_adj_close = df_adj_close.ewm(alpha=0.55).mean()\nprint('Computing price changes')\nprice_change = df_adj_close.pct_change()\n\nprint('Running optimisation...')\n\nstrategyFinder = QPSOStrategyFinder(30, 100, m=len(ticket_list))\nbest = strategyFinder.run(price_change)\n\nplt.plot(best.evaluate(price_change))\nplt.show()\n\nselected_tickets = []\nselected_w = []\nfor i in range(len(ticket_list)):\n if best.w[i] > 0.1:\n print('%s:%f' % (ticket_list[i], best.w[i]))\n selected_tickets.append(ticket_list[i])\n selected_w.append(best.w[i])\n\nprint(np.sum(best.w))\nprint(selected_tickets)\nprint(selected_w)\n\nplt.plot(df_adj_close[selected_tickets])\nplt.show()\n\nplt.plot(df_adj_close[selected_tickets].pct_change())\nplt.show()\n","repo_name":"xSakix/qpso_strategy","sub_path":"find_strategy.py","file_name":"find_strategy.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69926322825","text":"'''\n7-3 找列表中最大元素的下标(高教社,《Python编程基础及应用》习题4-7 (10 分)\n输入一个整数列表,找出整数列表中最大元素的下标,如果最大元素的个数超过1,那么请打印输出所有的下标。\n\n输入格式:\n数字1,数字2,数字3,....,数字n\n\n输出格式:\n下标1 下标2 ... 下标k\n\n输入样例:\n3,2,3\n输出样例:\n0\n2\n'''\nalist=list(map(int,input().split(',')))\nmax=alist[0]\nfor i in range(len(alist)):\n if alist[i]>max:\n max=alist[i]\n m=i\nfor j in range(len(alist)):\n if alist[j]==max:\n m=j\n print(j)","repo_name":"aurorg/python","sub_path":"6-3.py","file_name":"6-3.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14729013708","text":"import argparse\nimport json\nimport os\nfrom urllib.parse import urljoin\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom download import download_image, download_txt, parse_book_page, check_for_redirect, get_pages_number\n\n\ndef main():\n books = []\n parser = argparse.ArgumentParser(description='Download books from tululu.org')\n parser.add_argument('--start_page', default=1, type=int)\n parser.add_argument('--end_page', default=get_pages_number(), type=int)\n parser.add_argument('--dest_folder', default=os.getcwd(), type=str,\n help='path to the directory with parsing results')\n parser.add_argument('--skip_imgs', action='store_true', help='do not download images')\n parser.add_argument('--skip_txt', action='store_true', help='do not download books')\n parser.add_argument('--json_path', default=os.getcwd(), type=str,\n help='specify your path to *.json file with results')\n\n args = parser.parse_args()\n for page in range(args.start_page, args.end_page):\n url = f'https://tululu.org/l55/{page}/'\n response = requests.get(url)\n response.raise_for_status()\n soup = BeautifulSoup(response.text, 'lxml')\n\n books_from_page = soup.select('.d_book')\n for book in books_from_page:\n href = book.select_one('a').get('href')\n book_id = href[2:-1]\n url = urljoin('https://tululu.org/', href)\n response = requests.get(url)\n try:\n response.raise_for_status()\n check_for_redirect(response)\n book = parse_book_page(response.text)\n if not args.skip_txt:\n url = 'https://tululu.org/txt.php'\n params = {'id': book_id}\n filename = f'{book_id}.{book[\"title\"]}'\n path = os.path.join(args.dest_folder, 'books/')\n book['file_path'] = download_txt(url, params, filename, path)\n if not args.skip_imgs:\n url = urljoin('https://tululu.org/', book[\"img_src\"])\n download_image(url, os.path.join(args.dest_folder, 'images/'))\n except requests.HTTPError:\n continue\n books.append(book)\n\n with open(os.path.join(args.json_path, 'books.json'), \"w\", encoding='utf-8') as file:\n json.dump(books, file, ensure_ascii=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ArtAlexei/lesson_library_parser","sub_path":"parse_tululu_category.py","file_name":"parse_tululu_category.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17063312403","text":"\"\"\"\nYou may have seen many ways and implementations to fill missing values up of your dataset. But most of them may give a high bias. \nHere is another efficient way to predict missing values using Gradient Boosting Model where you can predict missing values with \nvalues which may have some missing values too...\n\nCreated on Sun Dec 20 21:28:04 2020\n@author: Rakib Mahmud\n\"\"\"\n#Import All the packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport xgboost as xgb\n#%matplotlib qt\n\n#Import Dataset\ndf = pd.read_csv('AllData.csv') #The dataset used here is confidential till now\nnrows, nnodes = df.shape\n\n##Convert catagorical string data to numeric codes--------------------------------------\n#Select all categorical data resemble string/object datatype\nobj_df = df.select_dtypes(include=['object']).copy()\nobj_df = obj_df.iloc[:,1:]\ncol_nam = obj_df.columns\ndf = df.drop(col_nam, axis = 1) #delete those columns from dataset temporarily\n#Convert all datatype to categorical datatype to help further conversions\nobj_df[col_nam] = obj_df[col_nam].astype('category')\nobj_df.dtypes\n#Now, convert all categorial string values to numeric value\nfor col in col_nam:\n obj_df[col] = obj_df[col].cat.codes\n#Convert back null values to null\nfor ind in obj_df.index:\n for col in col_nam:\n if obj_df[col][ind] == -1:\n obj_df[col][ind] = np.nan \n#Now, concatenate them back\ndataset = pd.concat([df, obj_df], axis=1)\ndataset = dataset.drop(['Name'], axis = 1)\n#Find correlation matrix\ncorrelation = dataset.corr(min_periods=1) \n\n##--------------------------------------------------------------------------------------\n\n\n\n##Predict age to fill up the missing ages-----------------------------------------------\n#Select highly correlated columns with Age\nage_df = dataset[['Weight','Height','Evidence of having Severe Acute respiratory Distress Syndrome'\n ,'Occupation','[Cardiac problem]Chronic diseases','[Respiratory problem]Chronic diseases',\n 'Concurrent risk factors','Age']]\n#Spliting test-train data\ntest_df = age_df[age_df[\"Age\"].isnull()]\nage_df_temp = age_df.dropna(subset = [\"Age\"])\n\ny_train = age_df_temp[\"Age\"]\nX_train = age_df_temp.drop(\"Age\", axis=1)\nX_test = test_df.drop(\"Age\", axis=1)\n#train model to fit dataset and predict missing values from column \"Age\"\nxg_reg = xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,\n max_depth = 5, alpha = 10, n_estimators = 10)\n#Change column names as it don't support [] or < or , in column's name\nX_train.columns = ['Weight','Height','Evidence of having Severe Acute respiratory Distress Syndrome'\n ,'Occupation','Cardiac problem','Respiratory problem','Concurrent risk factors']\nX_test.columns = ['Weight','Height','Evidence of having Severe Acute respiratory Distress Syndrome'\n ,'Occupation','Cardiac problem','Respiratory problem','Concurrent risk factors']\n\nxg_reg.fit(X_train,y_train)\n\ny_pred = np.floor(xg_reg.predict(X_test))\n\n#replace the missing values with predicted values\nit = 0\nind = 0\nfor ind in dataset.index:\n if np.isnan(dataset['Age'][ind]):\n dataset['Age'][ind] = y_pred[it]\n it = it+1\n##--------------------------------------------------------------------------------------\n","repo_name":"Rakib-Mahmud/Research-on-COVID-19-Data","sub_path":"Fill_missing_withML.py","file_name":"Fill_missing_withML.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2458425463","text":"from fastapi import APIRouter, Request, Depends, HTTPException, Cookie, status\nfrom sqlalchemy import or_\nfrom datatables import DataTable\nfrom models.Admin import notifModel\n\n# importing models one by one\nfrom models.Admin.notifModel import Notifications\nfrom models.Admin.supplyModel import Supplies\nfrom models.Admin.requestModel import Request as Req\nfrom models.Admin.returnModel import Return as Ret\n\nfrom models.Admin import notifModel\nfrom schemas.Admin import notifSchema\nfrom database import get_db\nfrom dependencies import get_token\nfrom typing import List, Optional\nfrom sqlalchemy.orm import Session, joinedload\n\n\nrouter = APIRouter(\n prefix='/notifications',\n tags=['notifications'],\n # dependencies=[Depends(get_token)]\n)\n\n#================================ Notifications Table =================================#\n\n# Notifications DataTable\n@router.get('/datatable')\ndef datatable(request: Request, db: Session = Depends(get_db)):\n try:\n def perform_search(queryset, user_input):\n return queryset.filter(\n or_\n (\n Notifications.notification_id.like('%' + user_input + '%'),\n Notifications.description.like('%' + user_input + '%'),\n Notifications.status.like('%' + user_input + '%'),\n Supplies.supply_name.like('%' + user_input + '%'),\n Notifications.request_id('%' + user_input + '%'),\n Notifications.return_id('%' + user_input + '%'),\n Notifications.created_at.like('%' + user_input + '%'),\n Notifications.updated_at.like('%' + user_input + '%'),\n )\n )\n\n table = DataTable(dict(request.query_params), Notifications, db.query(Notifications), \n [\n 'notification_id',\n ('supply_id', 'supply_notif.supply_name'),\n 'request_id',\n 'return_id',\n 'description',\n 'status',\n 'created_at',\n 'updated_at',\n ])\n\n table.searchable(lambda queryset, user_input: perform_search(queryset, user_input))\n \n return table.json()\n except Exception as e:\n print(e)\n\n# GET all Notifications\n@router.get('/')\ndef get_all_notification(db: Session = Depends(get_db)):\n notif = db.query(notifModel.Notifications).options(joinedload(notifModel.Notifications.supply_notif)).all()\n return notif\n\n# GET Notifications by ID\n@router.get('/{notification_id}', response_model=notifSchema.ShowNotification)\ndef get_one_notification(notification_id:str, db: Session = Depends(get_db)):\n notification = db.query(notifModel.Notifications).filter(notifModel.Notifications.notification_id == notification_id).first()\n if not notification:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Notifications with the id {notification_id} is not available\")\n return notification\n\n# CREATE Notifications\n@router.post('/')\ndef create_notification(request: notifSchema.CreateNotification, db: Session = Depends(get_db)):\n to_store = notifModel.Notifications(\n description = request.description,\n status = request.status,\n supply_id = request.supply_id,\n request_id = request.request_id,\n return_id = request.return_id,\n )\n db.add(to_store)\n db.commit()\n return {'message': 'Notification stored successfully.'}\n\n# UPDATE Notifications\n@router.put('/{notification_id}')\ndef update_notification(notification_id: str, notif: notifSchema.UpdateNotification, db: Session = Depends(get_db)): \n if not db.query(notifModel.Notifications).filter(notifModel.Notifications.notification_id == notification_id).update({\n 'status': notif.status,\n }):\n raise HTTPException(404, 'Notifications to update is not found')\n db.commit()\n return {'message': 'Notification updated successfully.'}\n\n# DELETE Notifications\n@router.delete('/{notification_id}')\ndef delete_notification(notification_id: str, db: Session = Depends(get_db)):\n if not db.query(notifModel.Notifications).filter(notifModel.Notifications.notification_id == notification_id, notifModel.Notifications.status == \"Resolved\").delete():\n raise HTTPException(404, 'Notifications to delete is not found')\n db.commit()\n return {'message': 'Notification removed successfully.'}\n\n","repo_name":"JOF-Qura/warehousing_system","sub_path":"routes/Admin/notifRoutes.py","file_name":"notifRoutes.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3115802367","text":"from django.contrib.auth.models import User\nfrom rest_framework import generics, permissions, authentication\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom django.contrib.auth.hashers import check_password\n\nfrom .serializers import UserSerializer, RegisterSerializer\n\n\nclass RegisterAPI(generics.CreateAPIView):\n serializer_class = RegisterSerializer\n allowed_methods = ('POST',)\n\n def post(self, request, *args, **kwargs):\n email = request.data['email']\n try:\n check_is_active = User.objects.get(email=email, username=request.data['username'])\n if not check_is_active.is_active:\n check_is_active.is_active = True\n check_is_active.save()\n data = {'username': check_is_active.username,\n 'email': check_is_active.email}\n return Response(data, status=status.HTTP_200_OK)\n else:\n data = {'username': check_is_active.username,\n 'email': check_is_active.email,\n 'message': 'user already exist'}\n return Response(data, status=status.HTTP_200_OK)\n except:\n serializer = RegisterSerializer(data=request.data)\n data = {}\n if serializer.is_valid():\n account = serializer.save()\n data['email'] = account.email\n data['username'] = account.username\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n data = serializer.errors\n return Response(data, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass LoginAPI(ObtainAuthToken):\n permission_classes = (permissions.AllowAny,)\n queryset = User.objects.all()\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data,\n context={'request': request})\n get_user = self.queryset.filter(username=request.data['username']).first()\n if get_user:\n if get_user.is_active is True:\n if check_password(request.data['password'], get_user.password):\n serializer.is_valid(raise_exception=True)\n # user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=get_user)\n return Response({\n 'token': token.key,\n 'user_id': get_user.pk,\n 'email': get_user.email\n })\n else:\n return Response({\n 'message': 'User password not matched'\n })\n else:\n return Response({\n \"error\": \"Inactive user not able to login\",\n })\n else:\n return Response({\n \"error\": \"User does not exist\",\n })\n\n\nclass UpdateIsActiveAPIView(generics.UpdateAPIView):\n authentication_classes = (authentication.TokenAuthentication,)\n permission_classes = (permissions.IsAuthenticated,)\n\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n def update(self, request, *args, **kwargs):\n try:\n user = request.user.id\n instance = self.queryset.get(id=user)\n if instance.is_active is True:\n instance.is_active = False\n instance.save()\n return Response({\"message\": \"user successfully deleted\"},\n status=status.HTTP_200_OK)\n else:\n return Response({\"message\": \"user already deleted\"},\n status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n print(e)","repo_name":"SalmanTahir786/auth-module","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24750186546","text":"import requests\nfrom housesspider import settings\nimport pymongo\n'''这里是用来下载包含有户型以及面积信息的图片'''\n\n\ncon = pymongo.MongoClient(host=settings.DATABASE_IP, port=settings.MONGODB_PORT)\ndb = con['lianjia_ershouhouse']\ncoll_namelist = db.list_collection_names()\n\n\ndef reduct_houseid(dbid):\n '''吧mongodb每一个数据的_id还原成house_id'''\n return dbid[dbid.find('a') + 1:]\n\n\ndef get_allPicUrl():\n '''这是一个遍历房屋信息数据库并去除里面房屋户型图片的url的迭代器'''\n for each_coll_name in coll_namelist:\n coll = db[each_coll_name]\n for eachdb_data in coll.find():\n tmp_list = [reduct_houseid(eachdb_data['_id'])]\n for eachurl in eachdb_data[\"detail\"][8]:\n if 'x-se' in eachurl:\n tmp_list.append(eachurl)\n yield tmp_list, eachdb_data, coll\n\n\ndef downer(url, house_id):\n '''下载图片'''\n agent = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'}\n count = 1\n for each_url in url:\n with open('/home/pic/{}a{}.jpg'.format(house_id, count), 'wb') as f:\n f.write(requests.get(url=each_url, headers=agent).content)\n count += 1\n\n\nif __name__ == '__main__':\n '''下载有户型信息的图片用于识别实际使用面积'''\n for eachurldata in get_allPicUrl():\n house_id, url, dbdata, coll = eachurldata[0][0], eachurldata[0][1:], eachurldata[1], eachurldata[2]\n try:\n dbdata['downed']\n except KeyError:\n downer(url=url, house_id=house_id)\n db_id = dbdata['_id']\n coll.update_one({'_id': db_id}, {'$set': {'downed': True}})\n","repo_name":"KennyQiu1941/house","sub_path":"housesspider/down_pic.py","file_name":"down_pic.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18867081227","text":"class Solution:\n def findErrorNums(self, nums: List[int]) -> List[int]:\n dic = {x: 0 for x in range(1,len(nums)+1)}\n for num in nums:\n dic[num]+=1\n answer = [0,0]\n for key in dic:\n if dic[key]==2:\n answer[0] = key\n if dic[key]==0:\n answer[1] = key\n return answer\n","repo_name":"cyan9212/LeetCode","sub_path":"Hash/Set Mismatch.py","file_name":"Set Mismatch.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27128251285","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\nimport cgi\n\nimport faampy.fltcons\nfrom faampy.fltcons.db import DB\nfrom faampy.fltcons.Summary import Summary\nfrom faampy.fltcons.Plot import Plot\n\nfltcons_list = faampy.fltcons.PARAMETERS\nfltcons_list.sort()\n\n#faampy.fltcons.FIGURES_PATH = '/home/axel/.faampy/tmp/'\n\ndirname, filename = os.path.split(os.path.abspath(__file__))\n\nfaampy.fltcons.FIGURES_PATH = os.path.join(dirname, '..', 'img')\n\n\nspacer = \"

     

    \"\n\nform_header = \"\"\"
    \n
    \n \n \n \n\n\n\n
    Flight-Constant: \n
     Filter off\n
     
    \n\n\n
    \"\"\"\n\nhtml_header = 'Content-Type: text/html\\n\\n'\n\nhtml_body_header = \"\"\"Flight-Constant-Browser\n\"\"\"\n\nhtml_body_footer = \"\"\"\"\"\"\n\n\n\ndef showForm(fltcons, filtered=None):\n\n if not filtered:\n filtered = False\n filtered_checked_txt = 'checked'\n else:\n filtered_checked_txt = ''\n\n html = \"\"\n opt = \"\"\n if fltcons:\n opt += form_options % (fltcons, fltcons)\n\n for fltcon in fltcons_list:\n opt += form_options % (fltcon, fltcon)\n\n if fltcons:\n fcs = Summary(fltcons, filtered=filtered)\n fcs_txt = \"

    \" + fcs.__str__() + \"

    \"\n\n fcp = faampy.fltcons.Plot.Plot(fltcons)\n fcp.get_data()\n fcp.create()\n filename = os.path.join(faampy.fltcons.FIGURES_PATH, fltcons + '.png')\n fcp.Figure.savefig(filename)\n\n img_url = os.path.join(faampy.fltcons.FIGURES_URL, fltcons + '.png')\n fcs_plot = \"\"\"

    \"\"\" % ('/img'+img_url)\n else:\n fcs_txt = \"\"\n fcs_plot = \"\"\n\n html += html_header + \\\n html_body_header + \\\n form_header + \\\n opt + \\\n form_footer % (filtered_checked_txt) + \\\n fcs_plot + \\\n 6 * spacer + \\\n \"
    \" + \\\n fcs_txt + \\\n html_body_footer\n print(html)\n\n\ndef process():\n form = cgi.FieldStorage()\n # get flt-constant parameter name\n if form.has_key('cgi_fltcons'):\n fltcons = form['cgi_fltcons'].value\n else:\n fltcons = None\n if form.getvalue('cgi_filtered'):\n filter_value = False\n else:\n filter_value = True\n showForm(fltcons, filtered=filter_value)\n\n\nif __name__ == '__main__':\n process()\n\n","repo_name":"ncasuk/faampy","sub_path":"faampy/fltcons/cgi-fltcons-summary.py","file_name":"cgi-fltcons-summary.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"39040625762","text":"#!/usr/bin/env python3\n\nimport json\nimport turtle\nimport urllib.request\nimport turtle\nimport time\n\n\nurl = \"http://api.open-notify.org/astros.json\"\nresponse = urllib.request.urlopen(url)\nresult = json.loads(response.read())\nprint(\"People in space:\", result['number'])\npeople = result['people']\n\nfor p in people:\n print(p['name'], \"is in\", p['craft'])\n\nurl = \"http://api.open-notify.org/iss-now.json\"\nresponse = urllib.request.urlopen(url)\nresult = json.loads(response.read())\n\nlocation = result['iss_position']\nlat = location['latitude']\nlon = location['longitude']\n\nprint(\"Latitude:\", lat)\nprint(\"Longitude:\", lon)\n\nscreen = turtle.Screen()\nscreen.setup(720, 360)\nscreen.setworldcoordinates(-180, -90, 180, 90)\nscreen.bgpic('map.gif')\n\niss = turtle.Turtle()\nscreen.register_shape('iss2.gif')\niss.shape('iss2.gif')\niss.setheading(90)\niss.penup()\n\niss.goto(float(lon), float(lat))\n\nlat = -23.25\nlon = -46.66\n\nlocation = turtle.Turtle()\nlocation.penup()\nlocation.color('yellow')\nlocation.goto(lon, lat)\nlocation.dot(5)\nlocation.hideturtle()\n\nurl = 'http://api.open-notify.org/iss-pass.json?lat=' + str(lat) + '&lon=' + str(lon)\nresponse = urllib.request.urlopen(url)\nresult = json.loads(response.read())\n\nover = result['response'][1]['risetime']\nlocation.write(time.ctime(over))\n","repo_name":"mrbitsdcf/funwithsun","sub_path":"iss.py","file_name":"iss.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70237195146","text":"import asyncio\nimport re\nfrom urllib.parse import unquote\n\nimport aiohttp\nfrom faker import Faker\nfrom pyppeteer import launch\nfrom tenacity import retry, stop_after_attempt, wait_fixed\n\nfake = Faker('zh_CN')\nimport util\n\n\nclass VideoInfo(object):\n pass\n\n\n@retry(stop=stop_after_attempt(4), wait=wait_fixed(10))\nasync def getVideoInfo91(url):\n try:\n browser, page = await ini_browser()\n await asyncio.wait_for(page.goto(url), timeout=30.0)\n await page._client.send(\"Page.stopLoading\")\n\n await page.waitForSelector('.video-border')\n # 执行JS代码\n # evaluate页面跳转后 已经注入的JS代码会失效\n # evaluateOnNewDocument每次打开新页面注入\n strencode = await page.evaluate('''() => {\n return $(\".video-border\").html().match(/document.write([\\s\\S]*?);/)[1];\n }''')\n\n realM3u8 = await page.evaluate(\" () => {return \" + strencode + \".match(/src='([\\s\\S]*?)'/)[1];}\")\n\n imgUrl = await page.evaluate('''() => {\n return $(\".video-border\").html().match(/poster=\"([\\s\\S]*?)\"/)[1]\n }''')\n scCount = await page.Jeval('#useraction > div:nth-child(1) > span:nth-child(4) > span', 'el => el.innerText')\n title = await page.Jeval('#videodetails > h4', 'el => el.innerText')\n author = await page.Jeval('#videodetails-content > div:nth-child(2) > span.title-yakov > a:nth-child(1) > span',\n 'el => el.innerText')\n\n\n finally:\n # 关闭浏览器\n await page.close()\n await browser.close()\n\n videoinfo = VideoInfo()\n videoinfo.title = title\n videoinfo.author = author\n videoinfo.scCount = scCount\n videoinfo.realM3u8 = realM3u8\n videoinfo.imgUrl = imgUrl\n print(title)\n print(realM3u8)\n return videoinfo\n\n\nasync def ini_browser():\n browser = await launch(headless=True, dumpio=True, devtools=False,\n # userDataDir=r'F:\\temporary',\n args=[\n # 关闭受控制提示:比如,Chrome正在受到自动测试软件的控制...\n '--disable-infobars',\n # 取���沙盒模式,沙盒模式下权限太小\n '--no-sandbox',\n '--ignore-certificate-errors',\n '--disable-setuid-sandbox',\n '--disable-features=TranslateUI',\n '-–disable-gpu',\n '--disable-software-rasterizer',\n '--disable-dev-shm-usage',\n # log 等级设置,如果出现一大堆warning,可以不使用默认的日志等级\n '--log-level=3',\n ])\n page = await browser.newPage()\n await page.setUserAgent(fake.user_agent())\n await page.setExtraHTTPHeaders(\n headers={'X-Forwarded-For': await util.genIpaddr(), 'Accept-Language': 'zh-cn,zh;q=0.5'})\n await page.evaluateOnNewDocument('() =>{ Object.defineProperties(navigator,'\n '{ webdriver:{ get: () => false } }) }')\n return browser, page\n\n\n# 首页列表爬取\nasync def page91Index():\n try:\n browser, page = await ini_browser()\n await asyncio.wait_for(page.goto('http://91porn.com/index.php', {'waitUntil': 'networkidle0'}), timeout=30.0)\n await page._client.send(\"Page.stopLoading\")\n await page.waitForSelector('#wrapper > div.container.container-minheight > div.row > div > div > a')\n urls = await page.querySelectorAllEval(\n '#wrapper > div.container.container-minheight > div.row > div > div > div > div > a',\n 'nodes => nodes.map(node => node.href)')\n finally:\n await page.close()\n await browser.close()\n return urls\n\n\nasync def getHs(url):\n async with aiohttp.request(\"GET\", url,\n # proxy='http://127.0.0.1:10809'\n ) as r:\n # print(await r.text())\n text = await r.text()\n urls = re.findall('videoSrc = \\'(.*?)\\'', text)\n titles = re.findall(r'

    (.*?)<', text)\n authors = re.findall(r'作者:', text)\n imgs = re.findall(r'property=\"og:image\" content=\"(.*?)\"', text)\n videoinfo = VideoInfo()\n videoinfo.title = titles[0]\n videoinfo.author =unquote(authors[0])\n videoinfo.realM3u8 = urls[0]\n videoinfo.imgUrl=imgs[0]\n return videoinfo\n","repo_name":"benny3355/pornbot91_py","sub_path":"pyp/page91.py","file_name":"page91.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"18871737524","text":"# Node containing files\n# \tProvides read and write methods and registers with a master server\n\nimport requests\nfrom flask import Flask, request, send_file\nimport os\n\nfile_folder = '~/Documents/fourth-year/distsyst/fileSystem/files/'\nserver_key = 'Mary had a little lamb'\n\napp = Flask(__name__)\napp.config['FILE_FOLDER'] = file_folder\n\n@app.route(\"/write\", methods=['POST'])\ndef write_file():\n\tfile = request.files['file']\n\tfilepath = 'files/{filename}'.format(filename=file.filename)\n\twith open(filepath, 'wb') as open_file:\n\t\topen_file.write(file.read())\n\treturn 'File stored successfully.'\n\n@app.route('/read', methods=['GET'])\ndef read_file():\n\ttry:\n\t\tfilename = request.args.get('filename')\n\t\tfilepath = 'files/{filename}'.format(filename=filename)\n\t\treturn send_file(filepath, attachment_filename=filename)\n\texcept Exception as e:\n\t\treturn str(e)\n\nif __name__ == '__main__':\n\tapp.run()","repo_name":"CiaranCostello/distSyst","sub_path":"readerNode.py","file_name":"readerNode.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25899757960","text":"import cv2\nimport mediapipe as mp\nimport os\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# Khởi tạo đối tượng Mediapipe Hands\nmp_hands = mp.solutions.hands\nhands = mp_hands.Hands(\n static_image_mode=False, max_num_hands=1, min_detection_confidence=0.5\n)\n# static_image_mode: chế độ ảnh tĩnh\n\n\n# Hàm để đọc dữ liệu từ các tệp và thư mục con\ndef load_data_from_folders(data_folder):\n gestures = [] # Danh sách chứa dữ liệu tay\n labels = [] # Danh sách chứa nhãn tương ứng\n\n # Duyệt qua tất cả thư mục trong thư mục dữ liệu\n for folder_name in os.listdir(data_folder):\n folder_path = os.path.join(data_folder, folder_name)\n if os.path.isdir(folder_path):\n # Duyệt qua tất cả tệp tin văn bản trong thư mục con\n for file_name in os.listdir(folder_path):\n if file_name.endswith(\".txt\"):\n file_path = os.path.join(folder_path, file_name)\n with open(file_path, \"r\") as file:\n data = []\n for line in file:\n x, y, z = map(float, line.strip().split())\n data.extend([x, y, z])\n gestures.append(data) # Thêm dữ liệu vào danh sách tay\n labels.append(folder_name) # Thêm nhãn vào danh sách\n\n return np.array(gestures), np.array(labels)\n\n\n# Đường dẫn đến thư mục chứa dữ liệu\ndata_folder = \"hand_shape_data\"\n\n# Đọc dữ liệu từ thư mục và các thư mục con\ngestures, labels = load_data_from_folders(data_folder)\n\n# Chia dữ liệu thành tập huấn luyện và tập kiểm tra\nX_train, X_test, y_train, y_test = train_test_split(\n gestures, labels, test_size=0.3, random_state=42\n)\n\n# Huấn luyện mô hình K-Nearest Neighbors với k=3\nmodel = KNeighborsClassifier(n_neighbors=3)\nmodel.fit(X_train, y_train)\n\n# Dự đoán trên tập kiểm tra\ny_pred = model.predict(X_test)\n\naccuracy = accuracy_score(y_test, y_pred)\nprint(f\"Accuracy: {accuracy * 100:.2f}%\")\n\n# Khởi tạo webcam\ncap = cv2.VideoCapture(0)\n\nwhile cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = hands.process(frame_rgb)\n\n if results.multi_hand_landmarks:\n for i, hand_landmarks in enumerate(results.multi_hand_landmarks):\n mp_drawing = mp.solutions.drawing_utils\n mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)\n data = []\n for landmark in hand_landmarks.landmark:\n data.extend([landmark.x, landmark.y, landmark.z])\n data = np.array(data).reshape(1, -1)\n\n predicted_gesture = model.predict(data)[0]\n\n cv2.putText(\n frame,\n f\"Gesture: {predicted_gesture}\",\n (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (0, 255, 0),\n 2,\n )\n\n cv2.imshow(\"Hand Gesture Recognition\", frame)\n\n if cv2.waitKey(10) & 0xFF == ord(\"q\"):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"DucTien2003/hand-gesture-recognition","sub_path":"K-Nearest_Neighbors.py","file_name":"K-Nearest_Neighbors.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5017686170","text":"from scraper import scrapeSalary\nimport pandas as pd\n#This houses both of our cleaning commands that we though would make working with the data more manageable.\n\n#This is for salaries\ndef salaryClean(df):\n df.rename(columns={'seasonStartYear': 'SEASON'}, inplace=True)\n df['SEASON'] = df['SEASON'].astype(str).astype(int)\n df['playerName'] = df['playerName'].str.lower()\n\n #We removed the inflatoin adjested salary because the values might constantly change\n #This might give us a better model in the future\n df = df.drop(columns=['inflationAdjSalary'])\n #This would remove most of the rookie contracts or players who just fill space on a team\n df['salary'] = df['salary'].str.slice(start=1)\n df['salary'] = df['salary'].str.replace(\",\", \"\")\n df['salary'] = df['salary'].astype(str).astype(int)\n newDF = df[df[\"salary\"]>1000000]\n newDF = newDF.reset_index()\n return newDF\n\n#This is for player statistics\ndef playerSeasonClean(playerDF):\n playerDF['SEASON']= playerDF['SEASON'].str.slice(stop=4)\n playerDF = playerDF.drop(columns=['TEAM', 'LEAGUE','POS','G', 'GS'])\n playerDF['SEASON'] = pd.to_numeric(playerDF['SEASON'])\n alignedDF = playerDF[playerDF[\"SEASON\"] > 2009]\n alignedDF = playerDF[playerDF[\"SEASON\"] < 2020]\n return alignedDF\n\n","repo_name":"walex42/NBA_Project","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37761633069","text":"\"\"\"\nThe key to this problem is to recognize the pattern that\n(4n XOR 4n+1 XOR 4n+2 XOR 4n+3) = 0, and the fact that\n0 XOR n = n. It is therefore only necessary to XOR the first few ids\nuntil we reach a multiple of 4, and then skip to the end of the line and\nXOR a few ids until we reach another multiple of 4.\n\nChecked IDs at line's start may be as far from a multiple of 4 as 3, (4n - 3).\nChecked IDs near line's end may be as far from a multiple of 4 as 2, (4m + 2).\nTherefore we have to XOR as many as 3 IDs from the start of the line,\nand 3 IDs from the end of the line (since we have to XOR 4m, 4m+1, 4m+2), for\na worst case of 6 XOR operations per line. If there are k lines, then this\nalgorithm's time complexity is O(6k) == O(k), so it is linear in time.\nWithout skipping multiples of 4, it would take quadratic time.\n\"\"\"\n\ndef answer(start, length):\n checksum = 0\n id = start\n \n # skip counts the number of IDs we skip at the line end\n for skip in range(length):\n # Keep track of where we started for this line\n start = id\n\n # Number of of IDs we have to check\n checks = length - skip\n\n # XOR line's starting IDs until we reach a\n # multiple of 4\n while (id % 4 != 0):\n checksum = checksum ^ id\n id = id + 1\n \n # Skip to one past the end of the checked IDs\n id = start + checks\n \n # XOR the necessary remaining checked IDs\n while (id % 4 != 0):\n id = id - 1\n checksum = checksum ^ id\n \n # Advance id for the next line\n id = start + length\n return checksum","repo_name":"GavinFrazar/foo.bar","sub_path":"level3/xor_checksum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4049382836","text":"\nfrom bpy.types import Operator\n\nfrom .. import node_util as nu\nfrom .. import blender_texture_characterizer as btc\nfrom .. import util as string_util\nfrom ..feature import GroupNodesFeature\nfrom ..string_source import StringSource\n\n\nclass GroupImageSetOperator(Operator, GroupNodesFeature):\n\n bl_idname = \"texture_grapher.group_image_set\"\n bl_label = \"Group Texture Set\"\n bl_space_type = 'NODE_EDITOR'\n bl_region_type = \"UI\"\n bl_category = \"Texture Grapher\"\n\n def execute(self, context):\n node_tree = context.space_data.edit_tree\n nodes = nu.get_selected(node_tree.nodes)\n\n if len(nodes) > 0:\n texture_characterizer = btc.init_characterizer()\n source = string_util.get_best_source(nodes, [StringSource.FILEPATH, StringSource.IMAGE_NAME],\n StringSource.NAME)\n set_char = texture_characterizer.characterize_image_nodes(nodes, source)\n\n set_char.apply_image_settings()\n\n node_input_links = list()\n node_output_links = list()\n for link in node_tree.links:\n for n in nodes:\n for input in n.inputs:\n if link.to_socket == input:\n node_input_links.append((link.from_socket, link.to_socket.name, link.to_node.name))\n for output in n.outputs:\n if link.from_socket == output:\n node_output_links.append((link.from_socket.name, link.to_socket, link.from_node.name))\n\n group_node = set_char.group_nodes(node_tree, nodes=nodes,\n select_group_node=True,\n select_nodes_in_group=True,\n set_group_node_active=True,\n enter_group=False)\n group = group_node.node_tree\n nu.layout_input_output_nodes(group)\n set_char.setup_nodes_in_group(group)\n\n for from_socket, to_socket_name, grouped_node_name in node_input_links:\n grouped_node = group.nodes[grouped_node_name]\n to_socket = grouped_node.inputs[to_socket_name]\n group_node_to_socket = None\n\n for link in group.links:\n if link.to_socket == to_socket and link.from_socket.name in group_node.inputs:\n group_node_to_socket = group_node.inputs[link.from_socket.name]\n break\n\n if group_node_to_socket is not None:\n node_tree.links.new(from_socket, group_node_to_socket)\n\n for from_socket_name, to_socket, grouped_node_name in node_output_links:\n grouped_node = group.nodes[grouped_node_name]\n from_socket = grouped_node.outputs[from_socket_name]\n group_node_from_socket = None\n\n for link in group.links:\n if link.from_socket == from_socket and link.to_socket.name in group_node.outputs:\n group_node_from_socket = group_node.outputs[link.to_socket.name]\n break\n\n if group_node_from_socket is not None:\n node_tree.links.new(group_node_from_socket, to_socket)\n return {'FINISHED'}\n","repo_name":"narranoid/Blender-Texture-Grapher","sub_path":"textureblender/ops/group_image_set.py","file_name":"group_image_set.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5363735748","text":"from typing import Callable\nimport boto3\nimport os\nimport threading\n\n\nsqs = boto3.client('sqs',\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY'),\n aws_secret_access_key=os.getenv('AWS_SECRET_KEY'),\n region_name=os.getenv('AWS_DEFAULT_REGION')\n )\n\nrun_thread = True\n\ndef _listen_queue_messages(on_message: Callable):\n global run_thread\n\n while run_thread:\n # Get message from queue\n queue_response = sqs.receive_message(\n QueueUrl=os.getenv('AWS_QUEUE_URL'),\n WaitTimeSeconds=5\n )\n\n # If message exists, process it\n if queue_response is not None and 'Messages' in queue_response:\n for message in queue_response['Messages']:\n # Call the callback with the message\n on_message(message)\n # Delete the message from the queue\n sqs.delete_message(\n QueueUrl=os.getenv('AWS_QUEUE_URL'),\n ReceiptHandle=message['ReceiptHandle']\n )\n\ndef start_listening(on_message: Callable):\n print(\"Starting listener thread...\")\n threading.Thread(target=lambda: _listen_queue_messages(on_message)).start()\n print(\"Listener thread started.\")\n\ndef stop_listening():\n global run_thread\n run_thread = False\n print(\"Listener thread stopped.\")\n","repo_name":"Ninjeneer/CyberEyeOrchestrator","sub_path":"queue_reader.py","file_name":"queue_reader.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72642949066","text":"import os\nimport sys\nimport re\nimport random\n\n__all__ = [ 'KconfigDataError', 'KconfigParserError',\n 'KconfigData', 'KconfigParser' ,\n 'defconfig', 'allyesconfig', 'allnoconfig', 'randconfig' ]\n\ndef debug_print(*args):\n #print('# ' + (' '.join(str(x) for x in args)))\n pass\n\n# -------------------------------------------\n# KconfigData implements the Kconfig semantics. For now it can only\n# detect undefined symbols, i.e. symbols that were referenced in\n# assignments or dependencies but were not declared with \"config FOO\".\n#\n# Semantic actions are represented by methods called do_*. The do_var\n# method return the semantic value of a variable (which right now is\n# just its name).\n# -------------------------------------------\n\nclass KconfigDataError(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\nallyesconfig = lambda x: True\nallnoconfig = lambda x: False\ndefconfig = lambda x: x\nrandconfig = lambda x: random.randint(0, 1) == 1\n\nclass KconfigData:\n class Expr:\n def __and__(self, rhs):\n return KconfigData.AND(self, rhs)\n def __or__(self, rhs):\n return KconfigData.OR(self, rhs)\n def __invert__(self):\n return KconfigData.NOT(self)\n\n # Abstract methods\n def add_edges_to(self, var):\n pass\n def evaluate(self):\n assert False\n\n class AND(Expr):\n def __init__(self, lhs, rhs):\n self.lhs = lhs\n self.rhs = rhs\n def __str__(self):\n return \"(%s && %s)\" % (self.lhs, self.rhs)\n\n def add_edges_to(self, var):\n self.lhs.add_edges_to(var)\n self.rhs.add_edges_to(var)\n def evaluate(self):\n return self.lhs.evaluate() and self.rhs.evaluate()\n\n class OR(Expr):\n def __init__(self, lhs, rhs):\n self.lhs = lhs\n self.rhs = rhs\n def __str__(self):\n return \"(%s || %s)\" % (self.lhs, self.rhs)\n\n def add_edges_to(self, var):\n self.lhs.add_edges_to(var)\n self.rhs.add_edges_to(var)\n def evaluate(self):\n return self.lhs.evaluate() or self.rhs.evaluate()\n\n class NOT(Expr):\n def __init__(self, lhs):\n self.lhs = lhs\n def __str__(self):\n return \"!%s\" % (self.lhs)\n\n def add_edges_to(self, var):\n self.lhs.add_edges_to(var)\n def evaluate(self):\n return not self.lhs.evaluate()\n\n class Var(Expr):\n def __init__(self, name):\n self.name = name\n self.value = None\n self.outgoing = set()\n self.clauses_for_var = list()\n def __str__(self):\n return self.name\n\n def has_value(self):\n return not (self.value is None)\n def set_value(self, val, clause):\n self.clauses_for_var.append(clause)\n if self.has_value() and self.value != val:\n print(\"The following clauses were found for \" + self.name)\n for i in self.clauses_for_var:\n print(\" \" + str(i), file=sys.stderr)\n raise KconfigDataError('contradiction between clauses when setting %s' % self)\n debug_print(\"=> %s is now %s\" % (self.name, val))\n self.value = val\n\n # depth first search of the dependency graph\n def dfs(self, visited, f):\n if self in visited:\n return\n visited.add(self)\n for v in self.outgoing:\n v.dfs(visited, f)\n f(self)\n\n def add_edges_to(self, var):\n self.outgoing.add(var)\n def evaluate(self):\n if not self.has_value():\n raise KconfigDataError('cycle found including %s' % self)\n return self.value\n\n class Clause:\n def __init__(self, dest):\n self.dest = dest\n def priority(self):\n return 0\n def process(self):\n pass\n\n class AssignmentClause(Clause):\n def __init__(self, dest, value):\n KconfigData.Clause.__init__(self, dest)\n self.value = value\n def __str__(self):\n return \"CONFIG_%s=%s\" % (self.dest, 'y' if self.value else 'n')\n\n def process(self):\n self.dest.set_value(self.value, self)\n\n class DefaultClause(Clause):\n def __init__(self, dest, value, cond=None):\n KconfigData.Clause.__init__(self, dest)\n self.value = value\n self.cond = cond\n if not (self.cond is None):\n self.cond.add_edges_to(self.dest)\n def __str__(self):\n value = 'y' if self.value else 'n'\n if self.cond is None:\n return \"config %s default %s\" % (self.dest, value)\n else:\n return \"config %s default %s if %s\" % (self.dest, value, self.cond)\n\n def priority(self):\n # Defaults are processed just before leaving the variable\n return -1\n def process(self):\n if not self.dest.has_value() and \\\n (self.cond is None or self.cond.evaluate()):\n self.dest.set_value(self.value, self)\n\n class DependsOnClause(Clause):\n def __init__(self, dest, expr):\n KconfigData.Clause.__init__(self, dest)\n self.expr = expr\n self.expr.add_edges_to(self.dest)\n def __str__(self):\n return \"config %s depends on %s\" % (self.dest, self.expr)\n\n def process(self):\n if not self.expr.evaluate():\n self.dest.set_value(False, self)\n\n class SelectClause(Clause):\n def __init__(self, dest, cond):\n KconfigData.Clause.__init__(self, dest)\n self.cond = cond\n self.cond.add_edges_to(self.dest)\n def __str__(self):\n return \"select %s if %s\" % (self.dest, self.cond)\n\n def process(self):\n if self.cond.evaluate():\n self.dest.set_value(True, self)\n\n def __init__(self, value_mangler=defconfig):\n self.value_mangler = value_mangler\n self.previously_included = []\n self.incl_info = None\n self.defined_vars = set()\n self.referenced_vars = dict()\n self.clauses = list()\n\n # semantic analysis -------------\n\n def check_undefined(self):\n undef = False\n for i in self.referenced_vars:\n if not (i in self.defined_vars):\n print(\"undefined symbol %s\" % (i), file=sys.stderr)\n undef = True\n return undef\n\n def compute_config(self):\n if self.check_undefined():\n raise KconfigDataError(\"there were undefined symbols\")\n return None\n\n debug_print(\"Input:\")\n for clause in self.clauses:\n debug_print(clause)\n\n debug_print(\"\\nDependency graph:\")\n for i in self.referenced_vars:\n debug_print(i, \"->\", [str(x) for x in self.referenced_vars[i].outgoing])\n\n # The reverse of the depth-first order is the topological sort\n dfo = dict()\n visited = set()\n debug_print(\"\\n\")\n def visit_fn(var):\n debug_print(var, \"has DFS number\", len(dfo))\n dfo[var] = len(dfo)\n\n for name, v in self.referenced_vars.items():\n self.do_default(v, False)\n v.dfs(visited, visit_fn)\n\n # Put higher DFS numbers and higher priorities first. This\n # places the clauses in topological order and places defaults\n # after assignments and dependencies.\n self.clauses.sort(key=lambda x: (-dfo[x.dest], -x.priority()))\n\n debug_print(\"\\nSorted clauses:\")\n for clause in self.clauses:\n debug_print(clause)\n clause.process()\n\n debug_print(\"\")\n values = dict()\n for name, v in self.referenced_vars.items():\n debug_print(\"Evaluating\", name)\n values[name] = v.evaluate()\n\n return values\n\n # semantic actions -------------\n\n def do_declaration(self, var):\n if (var in self.defined_vars):\n raise KconfigDataError('variable \"' + var + '\" defined twice')\n\n self.defined_vars.add(var.name)\n\n # var is a string with the variable's name.\n def do_var(self, var):\n if (var in self.referenced_vars):\n return self.referenced_vars[var]\n\n var_obj = self.referenced_vars[var] = KconfigData.Var(var)\n return var_obj\n\n def do_assignment(self, var, val):\n self.clauses.append(KconfigData.AssignmentClause(var, val))\n\n def do_default(self, var, val, cond=None):\n val = self.value_mangler(val)\n self.clauses.append(KconfigData.DefaultClause(var, val, cond))\n\n def do_depends_on(self, var, expr):\n self.clauses.append(KconfigData.DependsOnClause(var, expr))\n\n def do_select(self, var, symbol, cond=None):\n cond = (cond & var) if cond is not None else var\n self.clauses.append(KconfigData.SelectClause(symbol, cond))\n\n def do_imply(self, var, symbol, cond=None):\n # \"config X imply Y [if COND]\" is the same as\n # \"config Y default y if X [&& COND]\"\n cond = (cond & var) if cond is not None else var\n self.do_default(symbol, True, cond)\n\n# -------------------------------------------\n# KconfigParser implements a recursive descent parser for (simplified)\n# Kconfig syntax.\n# -------------------------------------------\n\n# tokens table\nTOKENS = {}\nTOK_NONE = -1\nTOK_LPAREN = 0; TOKENS[TOK_LPAREN] = '\"(\"';\nTOK_RPAREN = 1; TOKENS[TOK_RPAREN] = '\")\"';\nTOK_EQUAL = 2; TOKENS[TOK_EQUAL] = '\"=\"';\nTOK_AND = 3; TOKENS[TOK_AND] = '\"&&\"';\nTOK_OR = 4; TOKENS[TOK_OR] = '\"||\"';\nTOK_NOT = 5; TOKENS[TOK_NOT] = '\"!\"';\nTOK_DEPENDS = 6; TOKENS[TOK_DEPENDS] = '\"depends\"';\nTOK_ON = 7; TOKENS[TOK_ON] = '\"on\"';\nTOK_SELECT = 8; TOKENS[TOK_SELECT] = '\"select\"';\nTOK_IMPLY = 9; TOKENS[TOK_IMPLY] = '\"imply\"';\nTOK_CONFIG = 10; TOKENS[TOK_CONFIG] = '\"config\"';\nTOK_DEFAULT = 11; TOKENS[TOK_DEFAULT] = '\"default\"';\nTOK_Y = 12; TOKENS[TOK_Y] = '\"y\"';\nTOK_N = 13; TOKENS[TOK_N] = '\"n\"';\nTOK_SOURCE = 14; TOKENS[TOK_SOURCE] = '\"source\"';\nTOK_BOOL = 15; TOKENS[TOK_BOOL] = '\"bool\"';\nTOK_IF = 16; TOKENS[TOK_IF] = '\"if\"';\nTOK_ID = 17; TOKENS[TOK_ID] = 'identifier';\nTOK_EOF = 18; TOKENS[TOK_EOF] = 'end of file';\n\nclass KconfigParserError(Exception):\n def __init__(self, parser, msg, tok=None):\n self.loc = parser.location()\n tok = tok or parser.tok\n if tok != TOK_NONE:\n location = TOKENS.get(tok, None) or ('\"%s\"' % tok)\n msg = '%s before %s' % (msg, location)\n self.msg = msg\n\n def __str__(self):\n return \"%s: %s\" % (self.loc, self.msg)\n\nclass KconfigParser:\n\n @classmethod\n def parse(self, fp, mode=None):\n data = KconfigData(mode or KconfigParser.defconfig)\n parser = KconfigParser(data)\n parser.parse_file(fp)\n return data\n\n def __init__(self, data):\n self.data = data\n\n def parse_file(self, fp):\n self.abs_fname = os.path.abspath(fp.name)\n self.fname = fp.name\n self.data.previously_included.append(self.abs_fname)\n self.src = fp.read()\n if self.src == '' or self.src[-1] != '\\n':\n self.src += '\\n'\n self.cursor = 0\n self.line = 1\n self.line_pos = 0\n self.get_token()\n self.parse_config()\n\n def do_assignment(self, var, val):\n if not var.startswith(\"CONFIG_\"):\n raise Error('assigned variable should start with CONFIG_')\n var = self.data.do_var(var[7:])\n self.data.do_assignment(var, val)\n\n # file management -----\n\n def error_path(self):\n inf = self.data.incl_info\n res = \"\"\n while inf:\n res = (\"In file included from %s:%d:\\n\" % (inf['file'],\n inf['line'])) + res\n inf = inf['parent']\n return res\n\n def location(self):\n col = 1\n for ch in self.src[self.line_pos:self.pos]:\n if ch == '\\t':\n col += 8 - ((col - 1) % 8)\n else:\n col += 1\n return '%s%s:%d:%d' %(self.error_path(), self.fname, self.line, col)\n\n def do_include(self, include):\n incl_abs_fname = os.path.join(os.path.dirname(self.abs_fname),\n include)\n # catch inclusion cycle\n inf = self.data.incl_info\n while inf:\n if incl_abs_fname == os.path.abspath(inf['file']):\n raise KconfigParserError(self, \"Inclusion loop for %s\"\n % include)\n inf = inf['parent']\n\n # skip multiple include of the same file\n if incl_abs_fname in self.data.previously_included:\n return\n try:\n fp = open(incl_abs_fname, 'rt', encoding='utf-8')\n except IOError as e:\n raise KconfigParserError(self,\n '%s: %s' % (e.strerror, include))\n\n inf = self.data.incl_info\n self.data.incl_info = { 'file': self.fname, 'line': self.line,\n 'parent': inf }\n KconfigParser(self.data).parse_file(fp)\n self.data.incl_info = inf\n\n # recursive descent parser -----\n\n # y_or_n: Y | N\n def parse_y_or_n(self):\n if self.tok == TOK_Y:\n self.get_token()\n return True\n if self.tok == TOK_N:\n self.get_token()\n return False\n raise KconfigParserError(self, 'Expected \"y\" or \"n\"')\n\n # var: ID\n def parse_var(self):\n if self.tok == TOK_ID:\n val = self.val\n self.get_token()\n return self.data.do_var(val)\n else:\n raise KconfigParserError(self, 'Expected identifier')\n\n # assignment_var: ID (starting with \"CONFIG_\")\n def parse_assignment_var(self):\n if self.tok == TOK_ID:\n val = self.val\n if not val.startswith(\"CONFIG_\"):\n raise KconfigParserError(self,\n 'Expected identifier starting with \"CONFIG_\"', TOK_NONE)\n self.get_token()\n return self.data.do_var(val[7:])\n else:\n raise KconfigParserError(self, 'Expected identifier')\n\n # assignment: var EQUAL y_or_n\n def parse_assignment(self):\n var = self.parse_assignment_var()\n if self.tok != TOK_EQUAL:\n raise KconfigParserError(self, 'Expected \"=\"')\n self.get_token()\n self.data.do_assignment(var, self.parse_y_or_n())\n\n # primary: NOT primary\n # | LPAREN expr RPAREN\n # | var\n def parse_primary(self):\n if self.tok == TOK_NOT:\n self.get_token()\n val = ~self.parse_primary()\n elif self.tok == TOK_LPAREN:\n self.get_token()\n val = self.parse_expr()\n if self.tok != TOK_RPAREN:\n raise KconfigParserError(self, 'Expected \")\"')\n self.get_token()\n elif self.tok == TOK_ID:\n val = self.parse_var()\n else:\n raise KconfigParserError(self, 'Expected \"!\" or \"(\" or identifier')\n return val\n\n # disj: primary (OR primary)*\n def parse_disj(self):\n lhs = self.parse_primary()\n while self.tok == TOK_OR:\n self.get_token()\n lhs = lhs | self.parse_primary()\n return lhs\n\n # expr: disj (AND disj)*\n def parse_expr(self):\n lhs = self.parse_disj()\n while self.tok == TOK_AND:\n self.get_token()\n lhs = lhs & self.parse_disj()\n return lhs\n\n # condition: IF expr\n # | empty\n def parse_condition(self):\n if self.tok == TOK_IF:\n self.get_token()\n return self.parse_expr()\n else:\n return None\n\n # property: DEFAULT y_or_n condition\n # | DEPENDS ON expr\n # | SELECT var condition\n # | BOOL\n def parse_property(self, var):\n if self.tok == TOK_DEFAULT:\n self.get_token()\n val = self.parse_y_or_n()\n cond = self.parse_condition()\n self.data.do_default(var, val, cond)\n elif self.tok == TOK_DEPENDS:\n self.get_token()\n if self.tok != TOK_ON:\n raise KconfigParserError(self, 'Expected \"on\"')\n self.get_token()\n self.data.do_depends_on(var, self.parse_expr())\n elif self.tok == TOK_SELECT:\n self.get_token()\n symbol = self.parse_var()\n cond = self.parse_condition()\n self.data.do_select(var, symbol, cond)\n elif self.tok == TOK_IMPLY:\n self.get_token()\n symbol = self.parse_var()\n cond = self.parse_condition()\n self.data.do_imply(var, symbol, cond)\n elif self.tok == TOK_BOOL:\n self.get_token()\n else:\n raise KconfigParserError(self, 'Error in recursive descent?')\n\n # properties: properties property\n # | /* empty */\n def parse_properties(self, var):\n had_default = False\n while self.tok == TOK_DEFAULT or self.tok == TOK_DEPENDS or \\\n self.tok == TOK_SELECT or self.tok == TOK_BOOL or \\\n self.tok == TOK_IMPLY:\n self.parse_property(var)\n\n # for nicer error message\n if self.tok != TOK_SOURCE and self.tok != TOK_CONFIG and \\\n self.tok != TOK_ID and self.tok != TOK_EOF:\n raise KconfigParserError(self, 'expected \"source\", \"config\", identifier, '\n + '\"default\", \"depends on\", \"imply\" or \"select\"')\n\n # declaration: config var properties\n def parse_declaration(self):\n if self.tok == TOK_CONFIG:\n self.get_token()\n var = self.parse_var()\n self.data.do_declaration(var)\n self.parse_properties(var)\n else:\n raise KconfigParserError(self, 'Error in recursive descent?')\n\n # clause: SOURCE\n # | declaration\n # | assignment\n def parse_clause(self):\n if self.tok == TOK_SOURCE:\n val = self.val\n self.get_token()\n self.do_include(val)\n elif self.tok == TOK_CONFIG:\n self.parse_declaration()\n elif self.tok == TOK_ID:\n self.parse_assignment()\n else:\n raise KconfigParserError(self, 'expected \"source\", \"config\" or identifier')\n\n # config: clause+ EOF\n def parse_config(self):\n while self.tok != TOK_EOF:\n self.parse_clause()\n return self.data\n\n # scanner -----\n\n def get_token(self):\n while True:\n self.tok = self.src[self.cursor]\n self.pos = self.cursor\n self.cursor += 1\n\n self.val = None\n self.tok = self.scan_token()\n if self.tok is not None:\n return\n\n def check_keyword(self, rest):\n if not self.src.startswith(rest, self.cursor):\n return False\n length = len(rest)\n if self.src[self.cursor + length].isalnum() or self.src[self.cursor + length] == '_':\n return False\n self.cursor += length\n return True\n\n def scan_token(self):\n if self.tok == '#':\n self.cursor = self.src.find('\\n', self.cursor)\n return None\n elif self.tok == '=':\n return TOK_EQUAL\n elif self.tok == '(':\n return TOK_LPAREN\n elif self.tok == ')':\n return TOK_RPAREN\n elif self.tok == '&' and self.src[self.pos+1] == '&':\n self.cursor += 1\n return TOK_AND\n elif self.tok == '|' and self.src[self.pos+1] == '|':\n self.cursor += 1\n return TOK_OR\n elif self.tok == '!':\n return TOK_NOT\n elif self.tok == 'd' and self.check_keyword(\"epends\"):\n return TOK_DEPENDS\n elif self.tok == 'o' and self.check_keyword(\"n\"):\n return TOK_ON\n elif self.tok == 's' and self.check_keyword(\"elect\"):\n return TOK_SELECT\n elif self.tok == 'i' and self.check_keyword(\"mply\"):\n return TOK_IMPLY\n elif self.tok == 'c' and self.check_keyword(\"onfig\"):\n return TOK_CONFIG\n elif self.tok == 'd' and self.check_keyword(\"efault\"):\n return TOK_DEFAULT\n elif self.tok == 'b' and self.check_keyword(\"ool\"):\n return TOK_BOOL\n elif self.tok == 'i' and self.check_keyword(\"f\"):\n return TOK_IF\n elif self.tok == 'y' and self.check_keyword(\"\"):\n return TOK_Y\n elif self.tok == 'n' and self.check_keyword(\"\"):\n return TOK_N\n elif (self.tok == 's' and self.check_keyword(\"ource\")) or \\\n self.tok == 'i' and self.check_keyword(\"nclude\"):\n # source FILENAME\n # include FILENAME\n while self.src[self.cursor].isspace():\n self.cursor += 1\n start = self.cursor\n self.cursor = self.src.find('\\n', self.cursor)\n self.val = self.src[start:self.cursor]\n return TOK_SOURCE\n elif self.tok.isalnum():\n # identifier\n while self.src[self.cursor].isalnum() or self.src[self.cursor] == '_':\n self.cursor += 1\n self.val = self.src[self.pos:self.cursor]\n return TOK_ID\n elif self.tok == '\\n':\n if self.cursor == len(self.src):\n return TOK_EOF\n self.line += 1\n self.line_pos = self.cursor\n elif not self.tok.isspace():\n raise KconfigParserError(self, 'invalid input')\n\n return None\n\nif __name__ == '__main__':\n argv = sys.argv\n mode = defconfig\n if len(sys.argv) > 1:\n if argv[1] == '--defconfig':\n del argv[1]\n elif argv[1] == '--randconfig':\n random.seed()\n mode = randconfig\n del argv[1]\n elif argv[1] == '--allyesconfig':\n mode = allyesconfig\n del argv[1]\n elif argv[1] == '--allnoconfig':\n mode = allnoconfig\n del argv[1]\n\n if len(argv) == 1:\n print (\"%s: at least one argument is required\" % argv[0], file=sys.stderr)\n sys.exit(1)\n\n if argv[1].startswith('-'):\n print (\"%s: invalid option %s\" % (argv[0], argv[1]), file=sys.stderr)\n sys.exit(1)\n\n data = KconfigData(mode)\n parser = KconfigParser(data)\n external_vars = set()\n for arg in argv[3:]:\n m = re.match(r'^(CONFIG_[A-Z0-9_]+)=([yn]?)$', arg)\n if m is not None:\n name, value = m.groups()\n parser.do_assignment(name, value == 'y')\n external_vars.add(name[7:])\n else:\n fp = open(arg, 'rt', encoding='utf-8')\n parser.parse_file(fp)\n fp.close()\n\n config = data.compute_config()\n for key in sorted(config.keys()):\n if key not in external_vars and config[key]:\n print ('CONFIG_%s=y' % key)\n\n deps = open(argv[2], 'wt', encoding='utf-8')\n for fname in data.previously_included:\n print ('%s: %s' % (argv[1], fname), file=deps)\n deps.close()\n","repo_name":"qemu/qemu","sub_path":"scripts/minikconf.py","file_name":"minikconf.py","file_ext":"py","file_size_in_byte":23359,"program_lang":"python","lang":"en","doc_type":"code","stars":8597,"dataset":"github-code","pt":"81"} +{"seq_id":"16135076789","text":"from weather import Weather\nfrom watson_developer_cloud import ConversationV1\nimport json\nimport watson_developer_cloud\nfrom click._compat import raw_input\n\nweather=Weather()\n#print(weather.lookup_by_location(\"Charlotte\").condition())\n\np=weather.lookup_by_location(\"Charlotte\").atmosphere()\nprint(p['humidity'])\n \n\nconversation = watson_developer_cloud.ConversationV1(\n username = '497b8111-3060-4043-9afb-df0e487e4710',\n password = 'gWfKEGCSWUiu',\n version = '2017-05-26'\n)\nprint(\"Start entering your messages\")\n\nresponse = conversation.message(\n workspace_id='727a8a5e-1b66-4009-9a5e-b64bd1505f4c',\n message_input={\n 'text': \"Charlotte\"\n }\n )\nprint (response)\n","repo_name":"nitishr12/Weather-Chatbot","sub_path":"test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21803511958","text":"#coding:utf-8\nimport os\nimport pdb\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nFRIEND = 'noteInfor' + os.sep + \"userData\" + os.sep + \"%s\" + os.sep + 'friends.txt'\n\ndef addFriend(request):\n\tID = request.GET.get('userID',None)\n\tfriendID = request.GET.get('friendID',None)\n\tfriendName = getFriendName(friendID)\n\tif checkFriend(ID,friendID):\n\t\treturn True #friend exist\n\twith open(FRIEND%ID,'a') as f:\n\t\tf.write(friendID + ',' + friendName + '\\n')\n\tf.close()\n\treturn True\n\ndef checkFileExist(path):\n\tif os.path.exists(path):\n\t\treturn True\n\treturn False\n\ndef checkFriend(ID,friendID):\n\tif not checkFileExist(FRIEND%ID):\n\t\treturn False #friend not exist\n\n\twith open(FRIEND%ID,'r') as f:\n\t\tfor friendInfor in f.readlines():\n\t\t\tfriendInfor = friendInfor.strip('\\n').split(',')\n\t\t\tif friendInfor[0] == friendID:\n\t\t\t\treturn True #friend exist\n\treturn False\n\ndef getFriendList(request):\n\tfriendList = []\n\tuserID = request.GET.get('userID',None)\n\twith open(FRIEND%userID,'r') as f:\n\t\tfor friend in f.readlines():\n\t\t\tfriend = friend.strip('\\n').split(',')\n\t\t\tif friend:\n\t\t\t\tfriendList.append({'friendID':friend[0],'friendName':friend[1]})\n\treturn friendList\n\ndef getFriendName(friendID):\n\tpath = 'noteInfor' + os.sep + 'account.txt'\n\tfriendName = ''\n\twith open(path,'r') as f:\n\t\tfor line in f.readlines():\n\t\t\tline = line.strip('\\n').split(',')\n\t\t\tif line[0] == friendID:\n\t\t\t\treturn line[2]\n\treturn friendName","repo_name":"pzh263853614/DataStructure","sub_path":"note/friend.py","file_name":"friend.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39376278668","text":"import logging\n\nimport telegram\n\n\nclass TelegramLogsHandler(logging.Handler):\n \"\"\"Custom telegram handler for logging.\"\"\"\n def __init__(self, token: str, chat_id: str) -> None:\n super().__init__()\n self.token = token\n self.chat_id = chat_id\n\n def emit(self, record: logging.LogRecord) -> None:\n tg_bot = telegram.Bot(token=self.token)\n tg_bot.send_message(chat_id=self.chat_id, text=self.format(record))\n","repo_name":"toor09/support_bot","sub_path":"custom_log_handlers.py","file_name":"custom_log_handlers.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10612533781","text":"fade_old_max_wait = 1 # Wait no more than this many seconds to fade out old action\n\nimport kivy\nkivy.require('1.9.0')\n\nfrom kivy.animation import Animation\nfrom kivy.clock import Clock\n\nclass Action:\n def __init__(self, action, old_action, client):\n self.action = action\n self.old_action = old_action\n\n self.client = client\n self.meteor = self.client.meteor\n self.time = self.client.time\n \n self.layer = self.action['layer']\n \n self.settings = self.combine_settings(self.client.defaults, self.action.get('settings'))\n self.args = action.get('args', {})\n \n self.fade_length = None\n \n self.ready = False\n self.shown = False\n self.removed = False\n \n self.anim_widgets = []\n self.anims_ended = 0\n \n self.show_schedule_handle = None\n \n def add_anim_widget(self, widget, prop, vin, vout):\n self.anim_widgets.append((widget, prop, vin, vout))\n \n def do_in_animation(self, duration):\n for widget, prop, vin, vout in self.anim_widgets:\n Animation.cancel_all(widget, prop)\n\n kwargs = {'transition': 'out_quad', 'duration': duration}\n kwargs[prop] = vin\n \n Animation(**kwargs).start(widget)\n \n def do_out_animation(self, duration):\n for widget, prop, vin, vout in self.anim_widgets:\n Animation.cancel_all(widget, prop)\n\n kwargs = {'transition': 'in_quad', 'duration': duration}\n kwargs[prop] = vout\n \n anim = Animation(**kwargs)\n anim.on_complete = self._out_animation_end\n anim.start(widget)\n \n def _out_animation_end(self, widget):\n self.anims_ended += 1\n\n if self.anims_ended >= len(self.anim_widgets):\n self.out_animation_end()\n \n def out_animation_end(self):\n pass\n \n def combine_settings(self, *args):\n result = {}\n for arg in args:\n if type(arg) == dict:\n for k, v in arg.items():\n if not type(v) == type(None):\n result[k] = v\n \n return result\n \n def get_current_widget_index(self):\n return\n \n def check_ready(self):\n return True\n \n def get_fade_duration(self):\n if self.fade_length == None:\n if self.old_action and self.old_action.fade_length:\n return self.old_action.fade_length or 0\n else: return 0\n\n else:\n return self.fade_length\n \n def remove_old(self):\n if self.old_action: \n self.old_action.hide(self.get_fade_duration())\n self.old_action.remove()\n self.old_action = None\n \n def show(self):\n self.show_schedule_handle = None\n\n self.ready = self.check_ready()\n\n if self.ready:\n self.shown = True\n \n self.remove_old()\n self.on_show(self.get_fade_duration())\n \n else:\n if self.old_action and self.time.now() - self.action['time'] > fade_old_max_wait:\n self.remove_old()\n \n self.show_schedule_handle = Clock.schedule_once(lambda dt: self.show(), 0)\n \n def on_show(self, duration):\n pass\n \n def hide(self, duration = None):\n if self.show_schedule_handle: self.show_schedule_handle.cancel()\n self.remove_old()\n \n if duration == None: duration = self.get_fade_duration()\n self.on_hide(duration)\n \n def on_hide(self, duration):\n self.shown = False\n \n def remove(self):\n if self.shown:\n Clock.schedule_once(lambda dt: self.remove(), 0)\n \n else:\n self.removed = True\n","repo_name":"cedarproject/displayminion","sub_path":"displayminion/Action.py","file_name":"Action.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"38583379740","text":"# import libraries\nimport sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n '''\n Loads dataset and returns dataframe\n\n Args:\n messages_filepath (str): messages .csv dataset filepath\n categories_filepath (str) categories .csv dataset filepath\n\n Returns:\n df (pd.dataframe): messages and categories merged pandas dataframe \n '''\n\n messages = pd.read_csv('./'+ messages_filepath)\n categories = pd.read_csv('./'+ categories_filepath)\n df = messages.merge(categories, on='id')\n\n return df\n\ndef clean_categories_data(categories_df): \n '''\n Cleans categories columns and data types\n\n Args:\n categories_df (pd.dataframe): categories dataframe\n\n Returns:\n categories (pd.dataframe): cleaned categories pandas dataframe \n '''\n\n categories = categories_df.str.split(';',expand=True)\n \n # select the first row of the categories dataframe\n row = categories.iloc[0]\n category_colnames = [col.split('-')[0] for col in row]\n categories.columns = category_colnames\n\n # set each value to be the last character of the string\n # convert column from string to numeric\n for column in categories:\n categories[column] = categories[column].str[-1:].astype(int)\n \n return categories\n\ndef clean_data(df):\n '''\n Cleans dataframe and removed duplicates \n\n Args:\n df (pd.dataframe): merged dataframe\n\n Returns:\n df (pd.dataframe): cleaned merged pandas dataframe \n '''\n\n # clean categories data and concat with main dataset: \n categories = clean_categories_data(df.categories)\n df.drop(['categories'], axis=1, inplace=True)\n df = pd.concat((df, categories), axis=1)\n \n\n # check for duplicates: \n print('Number of duplicates before cleaning...\\n {}'\n .format(df[df.duplicated(subset=None, keep=False)].count()[0]))\n\n # drop duplicates\n df.drop_duplicates(subset =None, keep =False, inplace =True)\n\n print('Number of duplicates after cleaning...\\n {}'\n .format(df[df.duplicated(subset=None, keep=False)].count()[0]))\n return df\n\n\ndef save_data(df, database_filename):\n '''\n Saves dataframe to sqlite database\n\n Args:\n df (pd.dataframe): merged and cleaned dataframe\n database_filename (str): sqlite database filename\n\n Returns:\n no returns\n '''\n\n # create sqlite database engine\n engine = create_engine('sqlite:///'+ database_filename)\n\n # save dataframe to sqllite database and replace if exists\n df.to_sql('Messages', engine, index=False, if_exists='replace')\n print ('Sample of saved data...') \n print (engine.execute(\"SELECT * FROM Messages\").fetchall()[0])\n\ndef main():\n if len(sys.argv) == 4:\n\n # get input from args: \n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n # load data: \n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n # clean data: \n print('Cleaning data...')\n df = clean_data(df)\n \n # save data:\n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"rawanm/DataScientistNanodegree","sub_path":"P5_DisasterResponsePipelines/data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"12239698466","text":"import os\n#from PIL import Image\nimport pathlib\nimport random\n\n# data is image or string, word is what it means\n# directory is where we want to save\n# dataType is boolean, 0 for pic, 1 for txt\ndef save(dataType, data, word, directory):\n # database folder - all folders for category (anatomy, mandarin, etc)\n if not os.path.exists(\"Databases\"):\n os.mkdir(\"Databases\")\n if not os.path.exists(\"Databases/\" + directory):\n directory = \"Databases/\" + directory\n os.mkdir(directory)\n else:\n directory = \"Databases/\" + directory\n # make 2 folders in directory if not already there\n # definition folder and data folder\n if not os.path.exists(directory + \"/Data\"):\n os.mkdir(directory + \"/Data\")\n # if data folder doesn't exist, set number = 0\n number = \"0\"\n # if data folder already exists, get most recent number\n else:\n numFile = open(directory + \"/number.txt\",\"r\")\n number = numFile.read()\n numFile.close();\n intNumber = int(number) + 1\n number = str(intNumber)\n if not os.path.exists(directory + \"/Word\"):\n os.mkdir(directory + \"/Word\")\n # inside data, pic folder and data folder\n if not os.path.exists(directory + \"/Data/Pic\"):\n os.mkdir(directory + \"/Data/Pic\")\n if not os.path.exists(directory + \"/Data/Text\"):\n os.mkdir(directory + \"/Data/Text\")\n # check if data is text or pic\n # save in diff folders\n if dataType == \"0\": # pic\n # if pic save as jpg\n # NUM_pic.jpg\n data.save(directory + \"/Data/Pic/\" + number + \"_pic\", \"PNG\")\n elif dataType == \"1\": # text\n # if text save as txt file\n # txtNUM.jpg\n file = open(directory + \"/Data/Text/\" + number + \"_text.txt\", \"w\")\n file.write(data)\n file.close()\n else:\n print (\"You did not give a valid dataType. 0 = pic, 1 = string\")\n \n # in data folder, save word as number_(0/1).txt\n numFile = open(directory + \"/Word/\" + number + \"_\" + dataType + \".txt\", \"w\")\n numFile.write(word)\n numFile.close()\n # save last number\n file = open(directory + \"/number.txt\", \"w\")\n file.write(number)\n file.close()\n\n# only returns file locations of word, data and dataType\ndef get(directory):\n # find word in Word/\n # split at _ and find number in 1st number\n # find dataType\n # return word file and corresponding file\n file = open(directory + \"/number.txt\",\"r\")\n maxNum = file.read()\n maxNum = int(maxNum)\n num = random.randint(0, maxNum)\n num = str(num)\n #directory = \"Databases/\" + directory\n # find out if word is connected to pic or text\n \n if os.path.isfile(directory + \"/Word/\" + num + \"_0.txt\"): # pic\n fileWord = open(directory + \"/Word/\" + num + \"_0.txt\",\"r\")\n word = fileWord.read()\n fileWord.close()\n fileData = open(directory + \"/Data/Pic/\" + num + \"_pic.png\")\n data = fileData.read()\n fileData.close()\n # return word location and data location\n array = [word,data]\n return (array)\n elif os.path.isfile(directory + \"/Word/\" + num + \"_1.txt\"): # text\n fileWord = open(directory + \"/Word/\" + num + \"_1.txt\",\"r\")\n word = fileWord.read()\n fileWord.close()\n fileData = open(directory + \"/Data/Text/\" + num + \"_text.txt\")\n data = fileData.read()\n fileData.close()\n # return word location and data location\n array = [word,data]\n return (array) \n else:\n print (\"You screwed up\")\nglobal category\ncategory = \"Anatomy\"\ndef call():\n dataType = \"1\" # text\n data = \"Deltoid, subscapularis\"\n word = \"Shoulder abductors\"\n directory = \"Anatomy\"\n save(dataType, data, word, directory)\n directory = \"Databases/\" + category\n print(directory)\n if os.path.isdir(directory):\n array = get(directory)\n return array\n #print(\"Word at \" + array[0])\n #print(\"Data at \" + array[1])\n\ndef getters(temp):\n category = temp \n#call(\"Anatomy\")\n \n","repo_name":"TaraCarette/Cue-Cards","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"401393216","text":"\r\n\r\ndef move_zeros(array):\r\n l = array.count('0') + array.count('0.0')\r\n lst=[]\r\n for i in array:\r\n if type(i)==str:\r\n lst.append(i)\r\n lst.append(str(i))\r\n n = lst.count('0')+ lst.count('0.0')-l\r\n n=n-l\r\n lst1 = []\r\n for i in array:\r\n if type(i)==str:\r\n lst1.append(i)\r\n if type(i)!=str and str(i) != '0' and str(i)!='0.0':\r\n lst1.append(i)\r\n for j in range(n):\r\n lst1.append(0)\r\n return lst1\r\n\r\n\r\nprint(move_zeros([False,1,0,1,2,0,1,3,\"a\"]))\r\nprint(move_zeros([9,0.0,0,9,1,2,0,1,0,1,0.0,3,0,1,9,0,0,0,0,9]))\r\nprint(move_zeros([\"a\",0,0,'0',\"b\",\"c\",\"d\",0,1,0,1,0,3,0,1,9,0,0,0,0,9]))\r\nprint(move_zeros([]))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"EmuRaha/MyPythonCodes","sub_path":"kata27.py","file_name":"kata27.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27113434763","text":"def solution(word):\n dic = {'A':0, 'E':1, 'I':2, 'O':3, 'U':4}\n \n t = [781, 156, 31, 6, 1]\n \n answer = 0\n \n for i, w in enumerate(word):\n answer += dic[w] * t[i]\n \n \n return answer + len(word)","repo_name":"HaneulJung/Programmers","sub_path":"Programmers/Lv. 2/모음사전.py","file_name":"모음사전.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40374548344","text":"import sys\nimport signal\nimport command\nfrom smartcard.Exceptions import NoCardException, CardConnectionException\nfrom smartcard.System import readers\nfrom smartcard.util import toHexString, toBytes\nfrom PySide2 import QtCore, QtGui, QtWidgets\nfrom ui.main import Ui_MainWindow\n\n\nclass MifareTools(Ui_MainWindow, QtWidgets.QMainWindow):\n\n def __init__(self, parent=None):\n super(MifareTools, self).__init__(parent)\n self.setupUi(self)\n self.setWindowTitle(\"Mifare Tools\")\n self.register_signal()\n\n # attribute registration\n self.is_picc_connect = False\n self.current_connection = None\n self.reader_model = QtGui.QStandardItemModel()\n\n # finally we render the user interface\n self.init_ui()\n\n def __del__(self):\n if self.is_picc_connect:\n self.disconnect_picc()\n\n # -------------------------------------------------------------------------\n # ****************** All functions shown to user is here ******************\n # -------------------------------------------------------------------------\n def init_ui(self):\n # set combobox\n self.cmbReader.setModel(self.reader_model)\n self.reload_readers()\n\n # -------------------------------------------------------------------------\n # *************** All signals must register on this section ***************\n # -------------------------------------------------------------------------\n def register_signal(self):\n # clicked signal\n self.btnReloadReader.clicked.connect(self.on_click)\n self.btnConnetPICC.clicked.connect(self.on_click)\n self.btnAuthKeyA.clicked.connect(self.on_click)\n self.btnAuthKeyB.clicked.connect(self.on_click)\n self.btnFactoryKeyA.clicked.connect(self.on_click)\n self.btnFactoryKeyB.clicked.connect(self.on_click)\n self.btnReadBlock.clicked.connect(self.on_click)\n self.btnWriteBlock.clicked.connect(self.on_click)\n self.cbASCII.clicked.connect(self.on_click)\n self.btnClearLog.clicked.connect(self.on_click)\n # signal when key a/b editing\n for i in range(0, 6):\n # key a\n attr_key_a = getattr(self, 'txtKeyA_%d' % i)\n attr_key_a.textEdited.connect(self.on_text_edited)\n attr_key_a.installEventFilter(self)\n # key b\n attr_key_b = getattr(self, 'txtKeyB_%d' % i)\n attr_key_b.textEdited.connect(self.on_text_edited)\n attr_key_b.installEventFilter(self)\n # signal when block text editing\n for i in range(0, 16):\n attr = getattr(self, 'txtBlock_%d' % i)\n attr.installEventFilter(self)\n attr.textEdited.connect(self.on_text_edited)\n # comoBox changed index\n self.cmbReader.currentIndexChanged\\\n .connect(self.on_combobox_index_changed)\n\n # -------------------------------------------------------------------------\n # ************************ Signal callback is here ************************\n # -------------------------------------------------------------------------\n def eventFilter(self, source, event):\n if (event.type() == QtCore.QEvent.KeyPress):\n if event.key() == QtCore.Qt.Key_Backspace:\n sender = source.objectName()\n prefix, index = sender.split('_')\n if prefix in [\"txtKeyA\", \"txtKeyB\", \"txtBlock\"]:\n if (len(source.text()) == 0):\n curr_index = int(index)\n prev_index = curr_index - 1\n if prev_index >= 0:\n prev_attr = getattr(self, '%s_%d' % (prefix, prev_index))\n prev_attr.setText(\"\")\n prev_attr.setFocus()\n return super(MifareTools, self).eventFilter(source, event)\n\n def on_combobox_index_changed(self):\n sender = self.sender().objectName()\n if sender == self.cmbReader.objectName():\n cmb_idx = self.cmbReader.currentIndex()\n reader_obj = self.reader_model.item(cmb_idx)\n if hasattr(reader_obj, 'data'):\n self.current_connection = reader_obj.data().createConnection()\n self.groupBoxPICCC.setEnabled(True)\n self.write_statusbar(\"Reader connected\")\n else:\n self.groupBoxPICCC.setEnabled(False)\n self.write_statusbar(\"Reader not connected\", \"blue\")\n return\n\n def on_click(self):\n sender = self.sender().objectName()\n if sender == self.btnReloadReader.objectName():\n self.reload_readers()\n elif sender == self.btnConnetPICC.objectName():\n if not self.is_picc_connect:\n self.connect_picc()\n else:\n self.disconnect_picc()\n elif sender == self.btnAuthKeyA.objectName():\n cmd = command.LOAD_AUTH\n for i in range(0, 6):\n cmd += \" %s\" % getattr(self, 'txtKeyA_%d' % i).text()\n # load auth\n status, response = self.transmit(cmd)\n if status != \"90 00\":\n return\n # auth block\n sector = self.spnSector.value()\n block = self.spnBlock.value()\n cmd = command.get_block_auth_cmd(sector, block, command.KEY_TYPE_A)\n self.transmit(cmd)\n elif sender == self.btnAuthKeyB.objectName():\n cmd = command.LOAD_AUTH\n for i in range(0, 6):\n cmd += \" %s\" % getattr(self, 'txtKeyB_%d' % i).text()\n # load auth\n status, response = self.transmit(cmd)\n if status != \"90 00\":\n return\n # auth block\n sector = self.spnSector.value()\n block = self.spnBlock.value()\n cmd = command.get_block_auth_cmd(sector, block, command.KEY_TYPE_B)\n self.transmit(cmd)\n elif sender == self.btnFactoryKeyA.objectName():\n for i in range(0, 6):\n getattr(self, 'txtKeyA_%d' % i).setText(\"FF\")\n elif sender == self.btnFactoryKeyB.objectName():\n for i in range(0, 6):\n getattr(self, 'txtKeyB_%d' % i).setText(\"FF\")\n elif sender == self.btnReadBlock.objectName():\n self.reset_block()\n sector = self.spnSector.value()\n block = self.spnBlock.value()\n cmd = command.read_block_cmd(sector, block)\n status, response = self.transmit(cmd)\n if status != \"90 00\":\n return\n i = 0\n for val in response.split():\n if self.cbASCII.isChecked():\n val = self.get_ascii_value(val)\n getattr(self, 'txtBlock_%d' % i).setText(val)\n i += 1\n elif sender == self.btnWriteBlock.objectName():\n sector = self.spnSector.value()\n block = self.spnBlock.value()\n cmd = command.write_block_cmd(sector, block)\n for i in range(0, 16):\n attr = getattr(self, \"txtBlock_%d\" % i)\n val = attr.text()\n if self.cbASCII.isChecked():\n val = self.get_hexa_value(val)\n cmd += \" %s\" % val\n self.transmit(cmd)\n elif sender == self.cbASCII.objectName():\n # import bytes\n for i in range(0, 16):\n attr = getattr(self, 'txtBlock_%d' % i)\n val = attr.text()\n if self.cbASCII.isChecked():\n attr.setMaxLength(1)\n attr.setText(self.get_ascii_value(val))\n else:\n attr.setMaxLength(2)\n attr.setText(self.get_hexa_value(val))\n elif sender == self.btnClearLog.objectName():\n self.txtAPDULog.clear()\n\n def on_text_edited(self, text):\n sender = self.sender().objectName()\n curr_attr = getattr(self, sender)\n prefix, index = sender.split('_')\n max_length = 0\n if prefix in [\"txtKeyA\", \"txtKeyB\"]:\n max_length = 6\n if prefix == \"txtBlock\":\n max_length = 16\n curr_index = int(index)\n next_index = curr_index + 1\n next_attr = None\n if next_index < max_length:\n next_attr = getattr(self, '%s_%d' % (prefix, next_index))\n\n if text:\n if self.cbASCII.isChecked() and prefix == 'txtBlock':\n if next_attr:\n # go to next field\n next_attr.setText(\"\")\n next_attr.setFocus()\n else:\n text = text.upper()\n curr_attr.setText(text)\n if next_attr and len(text) == 2:\n try:\n # go to next field\n self.get_ascii_value(text)\n next_attr.setText(\"\")\n next_attr.setFocus()\n except ValueError:\n curr_attr.setText(\"\")\n\n # -------------------------------------------------------------------------\n # ************************ Helper function is here ************************\n # -------------------------------------------------------------------------\n def connect_picc(self):\n try:\n self.current_connection.connect()\n status, uid = self.transmit(command.UID)\n if status != \"90 00\":\n return\n atr = toHexString(self.current_connection.getATR())\n self.txtUID.setText(uid)\n self.txtATR.setText(atr)\n self.btnConnetPICC.setText(\"Disconnect\")\n self.tabMain.setEnabled(True)\n self.is_picc_connect = True\n self.write_statusbar(\"Card connected\")\n except NoCardException as e:\n self.write_statusbar(str(e), \"red\")\n except CardConnectionException as e:\n self.reload_readers()\n self.write_statusbar(str(e), \"red\")\n\n def disconnect_picc(self):\n try:\n self.current_connection.disconnect()\n except CardConnectionException:\n pass\n\n uid = \"\"\n atr = \"\"\n self.txtUID.setText(uid)\n self.txtATR.setText(atr)\n self.btnConnetPICC.setText(\"Connect\")\n self.tabMain.setEnabled(False)\n self.is_picc_connect = False\n self.write_statusbar(\"Card disconnected\", \"blue\")\n self.reset_block()\n\n def get_ascii_value(self, hex_str):\n if not hex_str or hex_str == \"00\":\n return \"\"\n ascii = chr(int(hex_str, 16))\n return ascii\n\n def get_hexa_value(self, char):\n if not char:\n return \"00\"\n hexa = \"%x\" % ord(char)\n return hexa.upper()\n\n def reload_readers(self):\n self.cmbReader.clear()\n for reader in readers():\n reader_item = QtGui.QStandardItem(reader.name)\n reader_item.setData(reader)\n self.reader_model.appendRow(reader_item)\n\n def reset_block(self):\n for i in range(0, 16):\n attr = getattr(self, 'txtBlock_%d' % i)\n attr.setText(\"\")\n\n def transmit(self, cmd):\n status_code = response = None\n try:\n data, sw1, sw2 = self.current_connection.transmit(toBytes(cmd))\n status_code = toHexString([sw1, sw2])\n response = toHexString(data)\n\n # write status to apdu log\n apdu_request = \">> %s\\n\" % cmd\n apdu_response = \"<< (%s) %s\\n\" % (status_code, response)\n self.txtAPDULog.insertPlainText(apdu_request)\n self.txtAPDULog.insertPlainText(apdu_response)\n self.txtAPDULog.insertPlainText(\"\\n\")\n\n # write status to statusbar\n status_color = \"green\" if status_code == '90 00' else \"red\"\n self.write_statusbar(status_code, status_color)\n except CardConnectionException:\n self.disconnect_picc()\n\n return status_code, response\n\n def write_statusbar(self, message, color='green'):\n self.statusbar.setStyleSheet(\"color: %s;\" % color)\n self.statusbar.showMessage(message)\n\n\ndef main():\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n app = QtWidgets.QApplication(sys.argv)\n\n # ui main\n window = MifareTools()\n window.show()\n\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fananimi/mifare-tools","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"12396638530","text":"import scrapy\r\nfrom read_files import read_csv, read_excel\r\nbase_url = 'https://stackoverflow.com/questions/tagged/{}'\r\nclass SoSpider(scrapy.Spider):\r\n name = 'so'\r\n def start_requests(self):\r\n for tag in read_excel():\r\n yield scrapy.Request(base_url.format(tag))\r\n\r\n def parse(self, response):\r\n questions = response.xpath('normalize-space(//*[@id=\"mainbar\"]/div[4]/div/div[1]/text())').get()\r\n questions = questions.strip('questions')\r\n yield {\r\n 'questions': questions,\r\n 'url': response.url\r\n }\r\n","repo_name":"mrelbaek/course-scraper","sub_path":"myfirstscript.py","file_name":"myfirstscript.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24708047608","text":"from spack.pkg.k4.key4hep_stack import Ilcsoftpackage\n\n\nclass Kitrack(CMakePackage, Ilcsoftpackage):\n \"\"\"Toolkit for Tracking. Consists of KiTrack (Cellular Automaton, a Hopfield Neural Network, the hit and track classes) and Criteria (the criteria classes).\"\"\"\n\n url = \"https://github.com/iLCSoft/KiTrack/archive/v01-10.tar.gz\"\n homepage = \"https://github.com/iLCSoft/KiTrack\"\n git = \"https://github.com/iLCSoft/KiTrack.git\"\n\n maintainers = [\"vvolkl\"]\n\n version(\"master\", branch=\"master\")\n version(\n \"1.10\",\n sha256=\"e89e0553ba76946749e422aa470bbe20456b085efe523fb42f97565201376870\",\n )\n\n depends_on(\"ilcutil\")\n depends_on(\"marlin\")\n depends_on(\"root\")\n\n def cmake_args(self):\n args = []\n args.append(\n \"-DCMAKE_CXX_STANDARD=%s\" % self.spec[\"root\"].variants[\"cxxstd\"].value\n )\n return args\n","repo_name":"key4hep/key4hep-spack","sub_path":"packages/kitrack/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"5933041884","text":"import struct\n\ndef left_rotate(number, bits):\n result = ( (number << bits) | ( number >> (32 - bits) ) ) & 0xffffffff\n return result\n\ndef sha1(message):\n\n h0 = 0x67452301\n h1 = 0xEFCDAB89\n h2 = 0x98BADCFE\n h3 = 0x10325476\n h4 = 0xC3D2E1F0\n\n message_len_in_bytes = len(message)\n message_len_in_bits = message_len_in_bytes * 8\n\n #Padding:\n #Append a '1' bit\n message += b'\\x80'\n #Append '0' bits until message length is 64 bits less than a multiple of 512 bits\n message += b'\\x00' * ( ( 56 - (message_len_in_bytes + 1) % 64) % 64 )\n #Append 64 bits of length of string\n message += struct.pack(b'>Q', message_len_in_bits)\n\n #Message is hashed in pieces of 512 bits = 64 bytes\n for i in range(0, len(message), 64):\n\n w = [0] * 80\n\n for j in range(16):\n w[j] = struct.unpack(b'>I', message[i + j*4:i + j*4 + 4])[0]\n for j in range(16, 80):\n w[j] = left_rotate(w[j - 3] ^ w[j - 8] ^ w[j - 14] ^ w[j - 16], 1)\n\n a = h0\n b = h1\n c = h2\n d = h3\n e = h4\n\n for j in range(80):\n if ( 0 <= j <= 19 ):\n f = d ^ (b & (c ^ d))\n k = 0x5A827999\n elif ( 20 <= j <= 39 ):\n f = b ^ c ^ d\n k = 0x6ED9EBA1\n elif ( 40 <= j <= 59 ):\n f = (b & c) | (b & d) | (c & d)\n k = 0x8F1BBCDC\n elif ( 60 <= j <= 79 ):\n f = b ^ c ^ d\n k = 0xCA62C1D6\n\n a, b, c, d, e = ((left_rotate(a, 5) + f + e + k + w[j]) & 0xffffffff, a, left_rotate(b, 30), c, d)\n\n h0 = (h0 + a) & 0xffffffff\n h1 = (h1 + b) & 0xffffffff\n h2 = (h2 + c) & 0xffffffff\n h3 = (h3 + d) & 0xffffffff\n h4 = (h4 + e) & 0xffffffff\n\n return '%08x%08x%08x%08x%08x' % (h0, h1, h2, h3, h4)\n","repo_name":"sriram-rao/NTC_Project","sub_path":"sha1.py","file_name":"sha1.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74933567945","text":"import torch\nimport gluonnlp as nlp\nimport numpy as np\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom KoBERTModel.BERTDataset import BERTDataset\nfrom KoBERTModel.BERTClassifier import BERTClassifier\nfrom kobert.utils.utils import get_tokenizer\nfrom kobert.pytorch_kobert import get_pytorch_kobert_model\n\n\nmodel = None\nbertmodel, vocab = get_pytorch_kobert_model() # calling the bert model and the vocabulary\ndevice = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\ndef load_model():\n global model\n model = BERTClassifier(bertmodel, dr_rate=0.4).to(device)\n\n model.load_state_dict(torch.load('KoBERTModel/model/train.pt'), strict = False)\n model.eval() \n\ndef load_dataset(predict_sentence):\n tokenizer = get_tokenizer()\n tok = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower=False)\n\n data = [predict_sentence, '0']\n dataset_another = [data]\n another_test = BERTDataset(dataset_another, 0, 1, tok, max_len=64, pad=True, pair=False)\n return DataLoader(another_test, batch_size = 32, num_workers = 5) # torch 형식 변환\n\ndef inference(predict_sentence): # input = 보이스피싱 탐지하고자 하는 sentence\n print(\"※ KoBERT 추론 시작 ※\")\n\n test_dataloader = load_dataset(predict_sentence)\n \n for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(test_dataloader):\n token_ids = token_ids.long().to(device)\n segment_ids = segment_ids.long().to(device)\n\n valid_length = valid_length\n label = label.long().to(device)\n\n out = model(token_ids, valid_length, segment_ids)\n\n result = False\n test_eval = []\n for i in out:\n logits = i\n logits = logits.detach().cpu().numpy()\n\n if np.argmax(logits) == 0:\n test_eval.append(\"일반 음성 전화\")\n elif np.argmax(logits) == 1:\n test_eval.append(\"보이스피싱 전화\")\n result = True\n\n print(\"▶ 입력하신 내용은 '\" + test_eval[0] + \"' 입니다.\")\n return result\n\ndef run(text):\n load_model()\n return inference(text)","repo_name":"Voice-Phishing-Detection-App/ML","sub_path":"KoBERTModel/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14823752984","text":"from UiLayer.WindowLayer.window_layer import Window\nfrom Node.text_edit import LineEditWithBg\nfrom Node.button import ButtonClassicRed\nfrom Node.label import Label\nfrom Common.socket_id import *\nfrom Common.constants import *\nfrom Node.object_block import SkillBlock\n\n\nclass BattleSkill(Window):\n def __init__(self):\n super(BattleSkill, self).__init__()\n self.enable = False\n self.window_title = '战斗技能栏'\n self.has_title_ui = False\n self.width, self.height = 190, 308\n self.config_file = winconfig_dir + 'BattleSkill.csv'\n self.skills = [] # 战斗中的主动技能\n self.setup()\n self.setup_win_config()\n\n def setup(self):\n super().setup()\n\n def setup_skills(self, skills:list):\n self.skills = skills\n for i, skill in enumerate(self.skills):\n self.child('skill_block_'+str(i)).setup(skill)\n self.child('skill_block_' + str(i)).left_click_callback = game.battle_scene.on_battle_skill_left_click\n\n def setup_win_config(self, file=None, given_node=None):\n super().setup_win_config()\n _x, _y = 27, 28\n _w, _h = 88, 42\n index = 0\n for i in range(5):\n for j in range(2):\n skill = SkillBlock()\n skill.x, skill.y = _x, _y\n self.add_child('skill_block_' + str(index), skill)\n _x += _w\n index += 1\n _x = 27\n _y += _h","repo_name":"1992leiting/pygame_nt","sub_path":"client/UiLayer/WindowLayer/battle_skill.py","file_name":"battle_skill.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8475853238","text":"from PyQt5.QtCore import Qt, QPoint, QRectF\nfrom PyQt5.QtGui import QPainter, QTransform\nfrom PyQt5.QtWidgets import QGraphicsView, QGraphicsScene\n\nfrom src.view.display_sprite_object import DisplaySpriteObject\nfrom src.model.resources_cache import ResourcesCache\nimport src.helpers.utils as utils\n\n\nclass Display(QGraphicsView):\n def __init__(self):\n\n super(Display, self).__init__()\n\n self.setScene(QGraphicsScene())\n\n self._spriteObject = DisplaySpriteObject()\n\n self.scene().addItem(self._spriteObject)\n\n self._backgroundColor = None\n\n self._backLightOn = True\n\n self._lightBackgroundPixmap = ResourcesCache.get(\"CheckerTileLight\")\n\n self._darkBackgroundPixmap = ResourcesCache.get(\"CheckerTileDark\")\n\n self._lastFocusPoint = QPoint()\n\n self._fitInView = False\n\n self._panning = False\n\n self._leftMousePressed = False\n\n self._spacePressed = False\n\n self._dragPos = QPoint()\n\n self._storedTransform = QTransform()\n\n self.setTransformationAnchor(QGraphicsView.AnchorViewCenter)\n\n self.setResizeAnchor(QGraphicsView.AnchorViewCenter)\n\n self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n self.setRenderHint(QPainter.Antialiasing, False)\n\n self.setOptimizationFlag(QGraphicsView.DontAdjustForAntialiasing)\n\n self.setMouseTracking(True)\n\n self.setStyleSheet(\"border: 0px;\")\n\n\n @property\n def is_panning(self):\n return self._panning\n\n @property\n def zoom(self):\n return self.transform().m11()\n\n @property\n def backlight_enabled(self):\n return self._backLightOn\n\n @backlight_enabled.setter\n def backlight_enabled(self, value):\n self._backLightOn = value\n\n if self._backLightOn:\n self._spriteObject.background_pixmap = self._lightBackgroundPixmap\n else:\n self._spriteObject.background_pixmap = self._darkBackgroundPixmap\n\n self.update()\n\n @property\n def onion_skin_enabled(self):\n return self.sprite_object.enable_onion_skin\n\n @onion_skin_enabled.setter\n def onion_skin_enabled(self, value):\n\n if self._spriteObject.enable_onion_skin != value:\n\n self._spriteObject.enable_onion_skin = value\n self.update()\n\n def is_fit_in_view(self):\n return self._fitInView\n\n def reset_view(self):\n\n self.resetTransform()\n\n def toggle_view(self):\n\n if not self.transform().isIdentity():\n\n self._storedTransform = self.transform()\n self.resetTransform()\n\n else:\n\n self.setTransform(self._storedTransform)\n\n def set_fit_in_view(self, fit):\n\n if self._fitInView != fit:\n self._fitInView = fit\n\n self.resetTransform()\n\n if self._fitInView:\n # Calculate scale factor to cover view increasing the scale by multiples of 2.0\n # to keep pixel perfectness\n\n scale_factor_x = self.width() / self.sceneRect().width()\n scale_factor_y = self.height() / self.sceneRect().height()\n\n scale_factor = max(scale_factor_x, scale_factor_y)\n\n scale_factor = utils.snap_ceil(scale_factor, 2.0)\n\n self.scale(scale_factor, scale_factor)\n\n def toggle_fit_in_view(self):\n\n self.set_fit_in_view(not self._fitInView)\n\n def zoom_to(self, scale_target):\n\n scale_factor = scale_target / self.zoom\n\n self.scale(scale_factor, scale_factor)\n\n def zoom_by(self, scale_factor):\n\n self._fitInView = False\n\n self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)\n self.scale(scale_factor, scale_factor)\n self.setTransformationAnchor(QGraphicsView.AnchorViewCenter)\n\n def unload_sprite(self):\n\n self.reset_view()\n\n if not self._spriteObject.is_empty:\n self._spriteObject.unload_sprite()\n\n self.scene().update()\n\n def update_viewport(self):\n\n self._spriteObject.update_bounding_rect()\n\n w = self._spriteObject.sprite.width\n h = self._spriteObject.sprite.height\n\n self.setSceneRect(-w / 2, -h / 2, w, h)\n\n self.scene().update()\n\n def resizeEvent(self, e):\n\n w = self._spriteObject.sprite.width\n h = self._spriteObject.sprite.height\n\n self.setSceneRect(-w / 2, -h / 2, w, h)\n\n if not self._fitInView:\n\n self.centerOn(self._lastFocusPoint)\n\n else:\n self.centerOn(0, 0)\n\n def enterEvent(self, e):\n\n self.setFocus()\n\n def leaveEvent(self, e):\n\n self.clearFocus()\n\n def mousePressEvent(self, e):\n\n if e.button() == Qt.MiddleButton:\n\n self.setCursor(Qt.ClosedHandCursor)\n self._panning = True\n self._dragPos = e.pos()\n e.accept()\n return\n\n elif e.button() == Qt.LeftButton:\n\n self._leftMousePressed = True\n\n if self._spacePressed:\n self._panning = True\n self.setCursor(Qt.ClosedHandCursor)\n self._dragPos = e.pos()\n e.accept()\n return\n\n super(Display, self).mousePressEvent(e)\n\n def mouseReleaseEvent(self, e):\n\n print('DISPLAY MOUSE RELEASE')\n\n if self._panning and e.button() == Qt.MiddleButton:\n\n self.setCursor(Qt.ArrowCursor)\n self._panning = False\n\n elif e.button() == Qt.LeftButton:\n\n self._leftMousePressed = False\n\n if self._panning:\n\n self._panning = False\n\n if self._spacePressed:\n\n self.setCursor(Qt.OpenHandCursor)\n\n else:\n\n self.setCursor(Qt.BlankCursor)\n\n super(Display, self).mouseReleaseEvent(e)\n\n def mouseDoubleClickEvent(self, e):\n pass\n\n def mouseMoveEvent(self, e):\n\n if self._panning:\n new_pos = e.pos()\n diff = new_pos - self._dragPos\n self._dragPos = new_pos\n\n self.horizontalScrollBar().setValue(self.horizontalScrollBar().value() - diff.x())\n self.verticalScrollBar().setValue(self.verticalScrollBar().value() - diff.y())\n\n super(Display, self).mouseMoveEvent(e)\n\n def keyPressEvent(self, e):\n\n super(Display, self).keyPressEvent(e)\n\n if e.isAutoRepeat():\n return\n\n if e.key() == Qt.Key_Space and not self._leftMousePressed:\n self._spacePressed = True\n self.setCursor(Qt.OpenHandCursor)\n\n def keyReleaseEvent(self, e):\n\n super(Display, self).keyReleaseEvent(e)\n\n if e.isAutoRepeat():\n return\n\n if e.key() == Qt.Key_Space:\n self._spacePressed = False\n\n if not self._spacePressed and not self._panning:\n self.setCursor(Qt.BlankCursor)\n\n self.update()\n\n def wheelEvent(self, e):\n\n focus_point = self.mapToScene(e.pos())\n\n self._lastFocusPoint.setX(round(focus_point.x()))\n self._lastFocusPoint.setY(round(focus_point.y()))\n\n steps = e.angleDelta().y() / 120\n\n if steps == 0 or (steps > 0 and self.zoom > 32.0) or (steps < 0 and self.zoom < 0.1):\n e.ignore()\n return\n\n if steps > 1.0:\n steps = 1.0\n\n if steps < -1.0:\n steps = -1.0\n\n scale = pow(2.0, steps)\n\n self.zoom_by(scale)\n\n def paintEvent(self, e):\n\n super(Display, self).paintEvent(e)\n\n print('DRAWING')\n\n if self._spacePressed or self._panning:\n return\n\n painter = QPainter(self.viewport())\n\n self.draw_over_display(painter)\n\n '''\n Draw over display with no transformation\n '''\n def draw_over_display(self, painter):\n pass","repo_name":"rafaelvasco/SpriteMator","sub_path":"src/view/display_base_widget.py","file_name":"display_base_widget.py","file_ext":"py","file_size_in_byte":7827,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"74619213706","text":"import tensorflow as tf\nimport numpy as np\nimport os\ntf.app.flags.DEFINE_integer('training_iteration', 1000,'number of training iterations.') # 配置参数-迭代次数\ntf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.') # 配置参数-模型版本\ntf.app.flags.DEFINE_string('work_dir', 'model/', 'Working directory.') # 配置参数-模型存储路径\nFLAGS = tf.app.flags.FLAGS\n# 创建模型存储路径\nif(not os.path.exists(FLAGS.work_dir)):\n os.makedirs(FLAGS.work_dir)\n\n\n\nsess = tf.InteractiveSession()\n \nx = tf.placeholder('float', shape=[None, 5],name=\"inputs\") # 创建输入层 5维float\ny_ = tf.placeholder('float', shape=[None, 1]) # 创建输出层 1维float\nw = tf.get_variable('w', shape=[5, 1], initializer=tf.truncated_normal_initializer) # 创建网络层链接 w 并初始化\nb = tf.get_variable('b', shape=[1], initializer=tf.zeros_initializer) # 创建网络层链接 b 并初始化\n\nsaver = tf.train.Saver()\n\nsess.run(tf.global_variables_initializer())\n\n#saver.save(sess, './model/mylinermodel.ckpt',global_step=100,write_meta_graph=False)\n\n\ny = tf.add(tf.matmul(x, w) , b,name=\"outputs\") # 前向计算\nms_loss = tf.reduce_mean((y - y_) ** 2) # 误差平方损失值\ntrain_step = tf.train.GradientDescentOptimizer(0.005).minimize(ms_loss) # 梯度下降法 更新\ntrain_x = np.random.randn(1000, 5)\n# let the model learn the equation of y = x1 * 1 + x2 * 2 + x3 * 3\ntrain_y = np.sum(train_x * np.array([1, 2, 3,4,5]) + np.random.randn(1000, 5) / 100, axis=1).reshape(-1, 1)\nfor i in range(FLAGS.training_iteration):\n loss, _ = sess.run([ms_loss, train_step], feed_dict={x: train_x, y_: train_y})\n if i%100==0:\n print(\"loss is:\",loss)\n #graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,[\"inputs\", \"outputs\"]) # 存储变量为常数值\n #tf.train.write_graph(graph, \".\", FLAGS.work_dir + \"liner.pb\",as_text=False) # 保存模型文件.pb\n #saver.save(sess, './model/mylinermodel-'+str(i))\n saver.save(sess,'./model/linermodel.ckpt')\ntf.train.write_graph(sess.graph_def,FLAGS.work_dir,\"linermodel.pbtxt\",as_text=True)\nprint('Done exporting!')\nprint('Done training!')\n","repo_name":"JWei-D/test-of-tensorflow-c","sub_path":"codes/demotest.py","file_name":"demotest.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1648582214","text":"from Tkinter import *\n\nclass StatusBar(Frame):\n def __init__(self, master):\n Frame.__init__(self, master)\n self.label = Label(self, bd=1, relief=SUNKEN, anchor=W)\n self.label.pack(fill=X)\n\n def set(self, format, *args):\n self.label.config(text=format % args)\n self.label.update_idletasks()\n\n def clear(self):\n self.label.config(text=\"\")\n self.label.update_idletasks()\n\nroot = Tk()\nstatus = StatusBar(root)\nstatus.pack(side=BOTTOM, fill=X)\nstatus.set(\"This is status bar...\")\nroot.mainloop()","repo_name":"dizengrong/my_code","sub_path":"tk/statusbar.py","file_name":"statusbar.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37160369648","text":"import numpy as np\r\nimport os\r\nfrom bert4keras.backend import keras, set_gelu, K\r\nfrom bert4keras.tokenizers import Tokenizer\r\nfrom bert4keras.models import build_transformer_model\r\nfrom bert4keras.optimizers import Adam\r\nfrom bert4keras.snippets import sequence_padding, DataGenerator\r\nfrom bert4keras.snippets import open\r\nfrom keras.layers import Dropout, Dense\r\nfrom tqdm import tqdm\r\n\r\nset_gelu('tanh') \r\nline_label = {0: 'experience', 1: 'knowledge', 2: 'education', 3: 'project', 4: 'others'}\r\nlabel2index = {v:k for k,v in line_label.items()}\r\n\r\nprint(label2index)\r\n\r\nmaxlen = 128\r\nbatch_size = 16\r\nconfig_path = 'E:/bert4keras-master/wwm_uncased_L-24_H-1024_A-16/bert_config.json'\r\ncheckpoint_path = 'E:/bert4keras-master/wwm_uncased_L-24_H-1024_A-16/bert_model.ckpt'\r\ndict_path = 'E:/bert4keras-master/wwm_uncased_L-24_H-1024_A-16/vocab.txt'\r\n\r\n\r\n\r\n\r\ndef load_text_label_pairs(data_dir_path, label_type=None):\r\n if label_type is None:\r\n label_type = 'line_type'\r\n\r\n result = []\r\n\r\n for f in os.listdir(data_dir_path):\r\n data_file_path = os.path.join(data_dir_path, f)\r\n if os.path.isfile(data_file_path) and f.lower().endswith('.txt'):\r\n with open(data_file_path, mode='rt', encoding='utf8') as file:\r\n for line in file:\r\n \r\n line_type, line_label, sentence = line.strip().split('\\t')\r\n if line_label not in label2index.keys():\r\n continue\r\n if label_type == 'line_type':\r\n result.append((sentence, line_type))\r\n else:\r\n result.append((sentence, label2index[line_label]))\r\n return result\r\n\r\n\r\n\r\ntrain_data = load_text_label_pairs('E:/bert4keras-master/resume_data_73/train', label_type='line_label')\r\nvalid_data = load_text_label_pairs('E:/bert4keras-master/resume_data_73/valid', label_type='line_label')\r\ntest_data = load_text_label_pairs('E:/bert4keras-master/resume_data_73/test', label_type='line_label')\r\n\r\nlist_shape = np.array(train_data).shape\r\nprint('trainsize',list_shape)\r\n\r\nlist_shapev = np.array(valid_data).shape\r\nprint('validsize',list_shapev)\r\n\r\nlist_shapet = np.array(test_data).shape\r\nprint('testsize',list_shapet)\r\n\r\ntokenizer = Tokenizer(dict_path, do_lower_case=True)\r\n\r\n\r\nclass data_generator(DataGenerator):\r\n\r\n def __iter__(self, random=False):\r\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\r\n for is_end, (sentence, line_label) in self.sample(random):\r\n token_ids, segment_ids = tokenizer.encode(\r\n sentence, maxlen=maxlen\r\n )\r\n batch_token_ids.append(token_ids)\r\n batch_segment_ids.append(segment_ids)\r\n batch_labels.append([line_label])\r\n if len(batch_token_ids) == self.batch_size or is_end:\r\n batch_token_ids = sequence_padding(batch_token_ids)\r\n batch_segment_ids = sequence_padding(batch_segment_ids)\r\n batch_labels = sequence_padding(batch_labels)\r\n yield [batch_token_ids, batch_segment_ids], batch_labels\r\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\r\n\r\n\r\n# input Pre-training model\r\nbert = build_transformer_model(\r\n config_path=config_path,\r\n checkpoint_path=checkpoint_path,\r\n with_pool=True,\r\n return_keras_model=False,\r\n)\r\n\r\noutput = Dropout(rate=0.1)(bert.model.output)\r\noutput = Dense(\r\n units=5, activation='softmax', kernel_initializer=bert.initializer\r\n)(output)\r\n\r\nmodel = keras.models.Model(bert.model.input, output)\r\nmodel.summary()\r\n\r\nmodel.compile(\r\n loss='sparse_categorical_crossentropy',\r\n optimizer=Adam(1e-5), \r\n # optimizer=PiecewiseLinearLearningRate(Adam(5e-5), {10000: 1, 30000: 0.1}),\r\n metrics=['accuracy'],\r\n)\r\n\r\n# dataset transformer\r\ntrain_generator = data_generator(train_data, batch_size)\r\nvalid_generator = data_generator(valid_data, batch_size)\r\ntest_generator = data_generator(test_data, batch_size)\r\n\r\nprint(train_generator)\r\n\r\n\r\n#confuse matrix\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport itertools\r\n\r\ndef plot_confusion_matrix(cm,\r\n target_names,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Greys,\r\n normalize=True):\r\n \r\n \r\n accuracy = np.trace(cm) / float(np.sum(cm))\r\n misclass = 1 - accuracy\r\n\r\n if cmap is None:\r\n cmap = plt.get_cmap('Blues')\r\n\r\n plt.figure(figsize=(5, 4))\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n #plt.colorbar()\r\n\r\n if target_names is not None:\r\n tick_marks = np.arange(len(target_names))\r\n plt.xticks(tick_marks, target_names, rotation=45)\r\n plt.yticks(tick_marks, target_names)\r\n\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n\r\n\r\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n if normalize:\r\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n else:\r\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\r\n plt.show()\r\n# show confuse matrix\r\ndef plot_confuse(model,data):\r\n all_preds = np.array([])\r\n all_true = np.array([])\r\n for x_true, y_true in data:\r\n\r\n y_pred = model.predict(x_true).argmax(axis=1)\r\n y_true = y_true[:, 0]\r\n \r\n all_preds = np.concatenate((all_preds, y_pred))\r\n all_true = np.concatenate((all_true, y_true))\r\n conf_mat = confusion_matrix(all_true, all_preds)\r\n plt.figure()\r\n plot_confusion_matrix(conf_mat, normalize=False,target_names=labels,title='Confusion Matrix')\r\n#labels list\r\nlabels=['experience','knowledge','education','project','others']\r\n\r\n\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score,f1_score,classification_report\r\ndef evaluate(data):\r\n pre,recall, right = 0., 0., 0.\r\n all_preds = np.array([])\r\n all_true = np.array([])\r\n for x_true, y_true in data:\r\n\r\n y_pred = model.predict(x_true).argmax(axis=1)\r\n y_true = y_true[:, 0]\r\n \r\n all_preds = np.concatenate((all_preds, y_pred))\r\n all_true = np.concatenate((all_true, y_true))\r\n #pre += len(y_true)\r\n #recall += (len(y_true)+len(x_true))\r\n #right += (y_true == y_pred).sum()\r\n \r\n acc = precision_score(all_true, all_preds,average='weighted')\r\n recall = recall_score(all_true, all_preds, average='micro')\r\n f1score = f1_score(all_true, all_preds, average='weighted')\r\n print('acc',acc)\r\n print('recall',recall)\r\n print('f1score',f1score)\r\n \r\n print(classification_report(all_true, all_preds,digits=4))\r\n \r\n \r\n return recall\r\n #return (2*(right/pre)*(right/recall))/(right/pre+right/recall)\r\n\r\n\r\nclass Evaluator(keras.callbacks.Callback):\r\n\r\n def __init__(self):\r\n self.best_val_acc = 0.\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n val_acc = evaluate(valid_generator)\r\n if val_acc > self.best_val_acc:\r\n self.best_val_acc = val_acc\r\n model.save_weights('best_model.weights')\r\n test_acc = evaluate(test_generator)\r\n print(\r\n u'val_acc: %.5f, best_val_acc: %.5f, test_acc: %.5f\\n' %\r\n (val_acc, self.best_val_acc, test_acc)\r\n )\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n evaluator = Evaluator()\r\n\r\n model.fit(\r\n train_generator.forfit(),\r\n steps_per_epoch=len(train_generator),\r\n epochs=20,\r\n callbacks=[evaluator]\r\n )\r\n\r\n model.load_weights('best_model.weights')\r\n print(u'final test acc: %05f\\n' % (evaluate(test_generator)))\r\n plot_confuse(model, test_generator)\r\nelse:\r\n\r\n model.load_weights('best_model.weights')","repo_name":"ganchengguang/improve_keras_english_resume_IE","sub_path":"BERT_Model/bert train.py","file_name":"bert train.py","file_ext":"py","file_size_in_byte":8343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19185913386","text":"import os\nimport shutil\nfrom pathlib import Path\nfrom typing import Generator, IO\nfrom uuid import UUID, uuid4\n\nimport aiofiles\nfrom fastapi import UploadFile, HTTPException\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom starlette.requests import Request\n\nfrom backend.crud import CRUDVideo, CRUDVideoLike\nfrom backend.exceptions import VideoNotFoundException\nfrom backend.models import UserDB, VideoDB, VideoLikeDB\nfrom backend.schemas import UploadVideo, GetVideo, CreateLikeOnVideo\nfrom config import VIDEO_STORAGE_PATH, PREVIEW_STORAGE_PATH\nfrom backend.services.celery_tasks import _delete_video_file\n\n\nclass VideoService:\n async def save_video(\n self,\n user: UserDB,\n file: UploadFile,\n title: str,\n description: str | None,\n preview: UploadFile | None,\n session: AsyncSession\n ) -> GetVideo:\n video_path = self._generate_video_path(user.id, file.content_type.split(\"/\")[1])\n preview_path = None\n if file.content_type == 'video/mp4':\n await self._async_write_file(video_path, file)\n else:\n raise HTTPException(status_code=418, detail='Video isn\\'t mp4')\n\n if preview.content_type == 'application/octet-stream':\n preview = None\n\n if preview:\n if preview.content_type.split('/')[0] != 'image':\n raise HTTPException(status_code=418, detail='Preview isn\\'t image')\n else:\n preview_path = self._generate_preview_path(preview.filename)\n await self._async_write_file(preview_path, preview)\n\n video = UploadVideo(\n title=title,\n description=description,\n file=video_path,\n user=user.id,\n preview=preview_path.split('/')[-1] if preview else None,\n )\n crud_video = CRUDVideo(VideoDB, session)\n video = await crud_video.create(video)\n return GetVideo.model_validate(video)\n\n @staticmethod\n def _generate_video_path(user_id: UUID, file_format: str):\n return f'{VIDEO_STORAGE_PATH}/{user_id}_{uuid4()}.{file_format}'\n\n @staticmethod\n def _generate_preview_path(file_name: str):\n return f'{PREVIEW_STORAGE_PATH}/{uuid4()}_{file_name}'\n\n @staticmethod\n def _write_file(path: str, file: UploadFile):\n with open(path, 'wb') as file_obj:\n shutil.copyfileobj(file.file, file_obj)\n\n async def _async_write_file(self, path: str, file: UploadFile):\n self._create_storage_if_not_exists()\n async with aiofiles.open(path, 'wb') as file_obj:\n data = await file.read()\n await file_obj.write(data)\n\n @staticmethod\n def _create_storage_if_not_exists():\n if not os.path.exists(VIDEO_STORAGE_PATH):\n os.mkdir(VIDEO_STORAGE_PATH)\n if not os.path.exists(PREVIEW_STORAGE_PATH):\n os.mkdir(PREVIEW_STORAGE_PATH)\n\n async def delete_video(self, video_id, session: AsyncSession) -> GetVideo | None:\n crud_video = CRUDVideo(VideoDB, session)\n video = await crud_video.delete(video_id)\n if video:\n _delete_video_file.apply_async(args=(video.file,), countdown=6)\n return GetVideo.model_validate(video)\n\n @staticmethod\n async def get_video(video_id: int, session: AsyncSession) -> GetVideo | None:\n crud_video = CRUDVideo(VideoDB, session)\n video = await crud_video.get(video_id)\n if video:\n return GetVideo.model_validate(video)\n\n @staticmethod\n async def get_videos_by_user(user_id: UUID, session: AsyncSession) -> list[GetVideo]:\n crud_video = CRUDVideo(VideoDB, session)\n videos = await crud_video.get_all(user_id)\n return [GetVideo.model_validate(video) for video in videos]\n\n @staticmethod\n async def get_all_videos(session: AsyncSession) -> list[GetVideo]:\n crud_video = CRUDVideo(VideoDB, session)\n videos = await crud_video.get_all()\n return [GetVideo.model_validate(video) for video in videos]\n\n @staticmethod\n def _ranged(\n file: IO[bytes],\n start: int = 0,\n end: int = None,\n block_size: int = 8192,\n ) -> Generator[bytes, None, None]:\n consumed = 0\n\n file.seek(start)\n while True:\n data_length = min(block_size, end - start - consumed) if end else block_size\n if data_length <= 0:\n break\n data = file.read(data_length)\n if not data:\n break\n consumed += data_length\n yield data\n\n if hasattr(file, 'close'):\n file.close()\n\n async def open_file(self, video_id: int, request: Request, session: AsyncSession):\n video = await self.get_video(video_id, session)\n if not video:\n raise VideoNotFoundException()\n\n path = Path(video.file)\n file = path.open('rb')\n\n file_size = path.stat().st_size\n\n content_length = file_size\n status_code = 200\n headers = {}\n content_range = request.headers.get('range')\n\n if content_range:\n content_range = content_range.strip().lower()\n content_ranges = content_range.split('=')[-1]\n range_start, range_end, *_ = map(str.strip, content_ranges.split('-'))\n range_start = int(range_start) if range_start else 0\n range_end = min(file_size - 1, int(range_end)) if range_end else file_size - 1\n content_length = range_end - range_start + 1\n file = self._ranged(file, start=range_start, end=range_end + 1)\n status_code = 206\n headers['Content-Range'] = f'bytes {range_start}-{range_end}/{file_size}'\n\n return file, status_code, content_length, headers\n\n @staticmethod\n async def add_or_delete_like(\n video_id: int,\n session: AsyncSession,\n current_user: UserDB,\n ) -> GetVideo:\n crud_video = CRUDVideo(VideoDB, session)\n crud_like = CRUDVideoLike(VideoLikeDB, session)\n like = CreateLikeOnVideo(video=video_id, user=current_user.id)\n like_db = await crud_like.get_like(like)\n if like_db:\n video = await crud_video.delete_like(video_id)\n if not video:\n raise VideoNotFoundException()\n await crud_like.delete(like_db.id)\n else:\n video = await crud_video.add_like(video_id)\n if not video:\n raise VideoNotFoundException()\n await crud_like.create(like)\n return GetVideo.model_validate(video)\n\n\n# @celery.task\n# def _delete_video_file(file_name: str) -> None:\n# os.remove(file_name)\n","repo_name":"baltikaa9/ZXCTube","sub_path":"backend/services/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":6730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74885972426","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport gettext\nimport subprocess\n\nfrom .data import zones\n\ngettext.textdomain('whois2')\nlocale_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'locale')\n_ = gettext.translation('whois2', locale_path, fallback=True).gettext\n\nDEFAULT_CACHE_TIMEOUT = 600\n\nRU_SUBDOMAINS = filter(lambda zone: re.match(r'^(\\w+\\.(ru$|su$)|ru.net$)', zone), zones)\n\n\nclass WhoisDomainBase(object):\n invalid = False\n\n def ascii_domain(self):\n \"\"\"\n Return domain name in ASCII (xn notation)\n \"\"\"\n return normalize_domain_name(self.domain)\n\n def unicode_domain(self):\n \"\"\"\n Return domain name in unicode format\n \"\"\"\n return self.ascii_domain().decode('idna')\n\n def is_idna(self):\n \"\"\"\n Return True if domain name contains non-latin symbols\n \"\"\"\n return is_idna(self.ascii_domain())\n\n\nclass WhoisDomain(WhoisDomainBase):\n \"\"\"\n whois2.check(..) result\n \"\"\"\n def __init__(self, domain, whois_data):\n self.domain = domain\n self.whois_data = whois_data\n\n\nclass WhoisDomainInvalid(WhoisDomainBase):\n \"\"\"\n whois2.check(..) result in case when domain name is invalid\n \"\"\"\n invalid = True\n def __init__(self, domain, validation_errors):\n self.domain = domain\n self.validation_errors = validation_errors\n\n\ndef get_whois(domain, whois_server=None, cache=None, cache_timeout=None):\n \"\"\"\n Get whois information from remote domain in plain text format\n\n :param domain: domain name which can be a valid string, IDN-encoded if necessary.\n :param cache: a cache object having two methods: set(key, value, timeout)\n and get(key), can be None, if you don't intend to use caching mechanism\n :param cache_timeout: cache timeout (in seconds)\n\n :returns: the string with the whois information about the domain\n :raises: RuntimeError (if \"whois\" command line utility returns with non-zero and non-one status)\n \"\"\"\n cmd = ['whois', '-H', domain]\n if whois_server:\n cmd += ['-h', whois_server]\n cache_key = ':'.join((domain, whois_server or ''))\n\n out = None\n if cache:\n out = cache.get(cache_key)\n if out is None:\n pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = pipe.communicate()\n if pipe.returncode in (0, 1):\n if cache:\n cache.set(cache_key, out, cache_timeout or DEFAULT_CACHE_TIMEOUT)\n return out\n error_text = ['cmd > {0}'.format(' '.join(cmd)), ]\n if out:\n error_text += ['out > {0}'.format(line) for line in out.splitlines()]\n if err:\n error_text += ['err > {0}'.format(line) for line in err.splitlines()]\n raise RuntimeError('\\n'.join(error_text))\n else:\n return out\n\n\ndef normalize_domain_name(domain_name):\n \"\"\"\n Normalized domain name\n \"\"\"\n return domain_name.replace(' ', '').strip().lower().encode('idna')\n\n\ndef is_idna(domain_name):\n \"\"\"\n IDNA check\n\n Return true is domain (either unicode or its idna representation) is in fact\n contains prohibited symbols which have to be IDNA encoded\n \"\"\"\n domain_name = domain_name.strip().lower()\n idna_domain = domain_name.encode('idna')\n return idna_domain.decode('idna') != idna_domain\n\n\ndef unicodify(domain_name):\n \"\"\"\n Convert any domain name (IDNA or unicode representation) to unicode\n \"\"\"\n return domain_name.encode('idna').decode('idna')\n","repo_name":"NetAngels/whois2","sub_path":"whois2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"19585755080","text":"from __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport csv\nimport json\nimport os\nimport re\nfrom io import open\n\nfrom ansible.errors import AnsibleParserError\nfrom ansible.module_utils._text import to_bytes, to_text\nfrom ansible.plugins.inventory import BaseInventoryPlugin\n\n__metaclass__ = type\n\nDOCUMENTATION = '''\n inventory: csv\n short_description: Uses CSV files and inventory source.\n description: |\n - CSV file based inventory, lines are split into groups by a blank line.\n - Each group of lines can either be a list of hosts or a list of group var definitions.\n - Hosts are listed with the following required fields (C(hostname), C(host_address))\n - Host groups are specified in group columns e.g. C(group 1), C(group 2) etc.\n - Host vars are listed in individual colums with headers formatted as follows - C({type}.var: {name}).\n - The 'type' field must be one of the following (or blank for the default)\n - S: string (default if no type specified)\n - B: boolean ('true' or 't' (case insensitive), anything else is false)\n - I: integer\n - F: float\n - L: list (formatted as JSON)\n - H: host (resolve to an ansible host. Only supported in host lists within the same block)\n - Group vars can be defined in two formats. The first is similar to hosts where each row is a group\n and columns are variables. The second format has 4 columns, C(group), C(var), C(val), C(type) and\n each row represents a single variable value for a single group.\n - In both formats the first column header must be C(group).\n notes:\n - \"The hostname of a host defaults to the value in the 'hostname' column but it can be overridden by\n adding a C(var: hostname) var column.\"\n - If this is done then the value in the C(hostname) column will be set as the value of the C(alt_hostname) var.\n'''\n\nEXAMPLES = '''\n example1: |\n # host list with groups and host vars\n hostname, host_address, group 1, group 2, var: v1, I.var: v2, L.var: v3\n host1, 192.168.33.21, web, , val1, 23,\n host2, 192.168.33.22, proxy, nginx, val2, 23,\n host3, 192.168.33.23, db, lvm, val3, , [\"v1\", \"v2\"]\n\n # group vars definition table (row format)\n group, var, val, type\n web, gvar1, 1, I\n web, gvar2, foo, S\n nginx, nginx_port, 80, I\n all, whitelist, [\"a\", \"b\"], L\n\n example2: |\n # separate groups of hosts can have their own headers\n hostname, host_address, group 1, group 2, var: v1\n host1, 192.168.33.21, web, , val1\n host2, 192.168.33.22, proxy, nginx, val2\n\n # comments are ignored\n # groups are separated by a blank line (or a line where the first cell is cells)\n hostname, host_address, group 1, I.var: db_port, var: root_path\n host3, 192.168.33.23, db, 1234, /opt/data/db\n \n # group table (column format)\n group, var: port, var: relay\n g1, 9010, True\n g2, 8000, False\n'''\n\n\n\n\nTYPE_STRING = 'S'\nTYPE_INTEGER = 'I'\nTYPE_BOOLEAN = 'B'\nTYPE_FLOAT = 'F'\nTYPE_LIST = 'L'\nTYPE_HOST = 'H'\n\n\nclass InventoryModule(BaseInventoryPlugin):\n \"\"\"Host inventory parser for ansible using csv files.\"\"\"\n\n NAME = 'csv'\n\n def __init__(self):\n super(InventoryModule, self).__init__()\n\n self._hosts = set()\n\n def verify_file(self, path):\n \"\"\"Verify if file is usable by this plugin, base does minimal accesability check\"\"\"\n valid = False\n if super(InventoryModule, self).verify_file(path):\n file_name, ext = os.path.splitext(path)\n if not ext or ext in ('.csv', '.CSV'):\n valid = True\n return valid\n\n def parse(self, inventory, loader, path, cache=False):\n super(InventoryModule, self).parse(inventory, loader, path)\n\n try:\n if self.loader:\n (b_data, private) = self.loader._get_file_contents(path)\n else:\n b_path = to_bytes(path, errors='surrogate_or_strict')\n with open(b_path, 'rb') as fh:\n b_data = fh.read()\n\n # Faster to do to_text once on a long string than many\n # times on smaller strings\n data = to_text(b_data, errors='surrogate_or_strict').splitlines()\n\n self._parse(data)\n except Exception as e:\n raise AnsibleParserError(e)\n\n def _parse(self, lines):\n row_groups = self._parse_row_groups(lines)\n for row_group in row_groups:\n rows = list(csv.DictReader(row_group))\n if 'hostname' in rows[0]:\n self._parse_hosts(rows)\n elif 'group' in rows[0]:\n self._parse_groups(rows)\n\n def _parse_hosts(self, rows):\n hosts_aliases = {\n row['hostname']: row['host_address']\n for row in rows\n }\n\n for row in rows:\n groups = self._get_host_groups(row)\n host_vars = self._get_host_vars(row, hosts_aliases)\n host = row['host_address']\n host_group = row['hostname']\n self.inventory.add_group(host_group)\n self.inventory.add_host(host, host_group)\n for group in groups:\n self.inventory.add_group(group)\n self.inventory.add_child(group, host_group)\n\n self._populate_host_vars([host], host_vars)\n\n def _get_host_groups(self, row):\n return [\n val.strip() for key, val in row.items()\n if key.startswith('group') and val.strip()\n ]\n\n def _get_host_vars(self, row, hosts_aliases):\n vars = {}\n for key, raw_val in row.items():\n raw_val = raw_val.strip()\n if 'var' in key and raw_val:\n item_type, name = key.split('.') if '.' in key else ('S', key)\n parts = re.split(r'var:\\s*', name)\n if len(parts) != 2:\n raise AnsibleParserError('Unable to parse varible name: \"{}\"'.format(name))\n name = parts[1]\n vars[name] = conv_str2value(item_type, raw_val, hosts_aliases)\n if 'hostname' not in vars:\n vars['hostname'] = row['hostname']\n else:\n vars['alt_hostname'] = row['hostname']\n return vars\n\n def _parse_groups(self, rows):\n if 'var' in rows[0] and 'val' in rows[0]:\n # row format\n for row in rows:\n group = row['group']\n self.inventory.add_group(group)\n var_name, item_type, raw_val = row['var'], row['type'], row['val'].strip()\n self.inventory.set_variable(group, var_name, conv_str2value(item_type, raw_val))\n else:\n # column format\n for row in rows:\n group = row['group']\n del row['group']\n self.inventory.add_group(group)\n for key, raw_val in row.items():\n raw_val = raw_val.strip()\n if 'var' in key and raw_val:\n item_type, name = key.split('.') if '.' in key else ('S', key)\n name = name.split(' ')[1]\n self.inventory.set_variable(group, name, conv_str2value(item_type, raw_val))\n\n def _parse_row_groups(self, csv_lines):\n \"\"\"Parse CSV lines into groups each with their own header column\"\"\"\n row_groups = []\n current_group = []\n for line in csv_lines:\n if line.startswith('#'):\n continue\n if not line.strip() or line.startswith(','):\n if current_group:\n row_groups.append(current_group)\n current_group = []\n else:\n current_group.append(line)\n if current_group:\n row_groups.append(current_group)\n return row_groups\n\n\ndef conv_str2value(item_type, item, hosts_aliases=None):\n \"\"\"\n Convert a character string to a specified data type.\n\n :param string item_type: A character string representing the type of item data.\n :param string item: Value of item data.\n :return: The converted value.\n \"\"\"\n\n if len(item) <= 0:\n return None\n\n if TYPE_STRING == item_type:\n return item\n elif TYPE_INTEGER == item_type:\n return int(item)\n elif TYPE_BOOLEAN == item_type:\n item = item.lower()\n return item in ('true', 't')\n elif TYPE_FLOAT == item_type:\n return float(item)\n elif TYPE_LIST == item_type:\n return json.loads(item)\n elif TYPE_HOST == item_type:\n if hosts_aliases is None:\n raise AnsibleParserError(\"Var of type host not supported: {}\".format(item))\n return hosts_aliases.get(item, item)\n\n return item\n","repo_name":"dimagi/commcare-cloud","sub_path":"src/commcare_cloud/ansible/plugins/inventory/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":9118,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"81"} +{"seq_id":"33298132295","text":"from __future__ import print_function # In python 2.7\nimport osmnx as ox\nfrom flask import Flask, redirect, url_for, render_template, request, session\nimport folium\nimport geocoder\nimport Model as Model\nimport time\ntime_start = time.time()\n\n\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html') \n\n\n@app.route(\"/map\")\ndef map():\n return render_template('map.html')\n\n\n@app.route(\"/preference\", methods=[\"POST\"])\ndef preference():\n address = request.form['address']\n print(address)\n location = geocoder.osm(address)\n if location.lat is None:\n error_statement = \"Please enter a valid address.\"\n return render_template(\"index.html\", error_statement=error_statement)\n print(location)\n print(type(location))\n print(location.lat)\n print(location.lng)\n latitude = location.lat\n longitude = location.lng\n my_map = folium.Map(location=(latitude, longitude), zoom_start=100, width='100%', height='55%')\n iframe = folium.IFrame(address,\n width=100,\n height=50)\n\n popup = folium.Popup(iframe,\n max_width=200)\n\n marker = folium.Marker([latitude, longitude],\n popup=popup).add_to(my_map)\n my_map.save('/Opti_Run_App/templates/gmap.html')\n return render_template('preference.html', address=address, latitude=latitude, longitude=longitude)\n\n\n@app.route(\"/route\", methods=[\"POST\"])\ndef route():\n street_crossing = request.form.get(\"street_crossing\")\n distance = request.form.get(\"distance\", type=float)\n print(distance)\n latitude = request.form.get(\"latitude\", type=float)\n longitude = request.form.get(\"longitude\", type=float)\n address = request.form.get(\"address\",type = str)\n print(\"address\")\n print(address)\n\n if not distance:\n error_statement = \"All Form Fields Required.\"\n return render_template(\"preference.html\", error_statement=error_statement, latitude=latitude, longitude=longitude, address=address)\n if distance < 0:\n error_statement = \"Must enter a distance greater than 0.\"\n return render_template(\"preference.html\", error_statement=error_statement, latitude=latitude, longitude=longitude, address=address)\n\n my_tup = Model.model_builder(latitude, longitude, distance, address)\n return render_template(my_tup[0], latitude=latitude, longitude=longitude,\n distance=my_tup[1], street_crossing=street_crossing)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"ylevanon/OptiRun","sub_path":"Flask_App.py","file_name":"Flask_App.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21796515351","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n n = len(height)\n\n ans = 0\n l = [0] * n # l[i] := max(height[0..i])\n r = [0] * n # r[i] := max(height[i..n))\n\n for i in range(n):\n l[i] = height[i] if i == 0 else max(height[i], l[i - 1])\n\n for i in range(n - 1, -1, -1):\n r[i] = height[i] if i == n - 1 else max(height[i], r[i + 1])\n\n for i in range(n):\n ans += min(l[i], r[i]) - height[i]\n\n return ans\n","repo_name":"problems-solving/LeetCode","sub_path":"solutions/0042. Trapping Rain Water/0042.py","file_name":"0042.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"23755273815","text":"from rest_framework import status\nfrom rest_framework import generics\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom .exceptions import UserExistsException\nfrom rest_api.models import Post\nfrom rest_api.serializers import PostSerializer, RegisterSerializer\nfrom django.contrib.auth.models import User\n\n\nclass RegisterView(generics.GenericAPIView):\n permission_classes = (AllowAny,)\n serializer_class = RegisterSerializer\n\n def post(self, request):\n serializer = self.get_serializer(data=request.data)\n\n try:\n if serializer.is_valid():\n serializer.save()\n return Response({'status': 'user_create'})\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n except UserExistsException:\n return Response(\n {'status': 'user_exists'},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n\nclass PostViewSet(viewsets.ModelViewSet):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = (IsAuthenticated,)\n\n def post(self, request, pk):\n post = Post.objects.get(id=pk)\n response_data = {'status': None}\n\n if request.user in User.objects.all():\n if 'action' in request.data:\n if request.data['action'] == 'unlike':\n post.like.remove(request.user.id)\n response_data['status'] = request.data['action']\n\n return Response(response_data)\n if request.data['action'] == 'like':\n post.like.add(request.user.id)\n response_data['status'] = request.data['action']\n\n return Response(response_data)\n return Response(\n {'status': 'bad request'},\n status=status.HTTP_400_BAD_REQUEST\n )\n","repo_name":"llCheDll/rest_api","sub_path":"src/rest_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41611647144","text":"import json\n\nimport xmltodict\nfrom decouple import config\nimport hashlib\nfrom urllib.parse import urlencode\nimport requests\nimport uuid\n\nbbbURL = config(\"BBB_URL\")\nbbbSecret = config(\"BBB_SECRET\")\n\n\ndef create_meeting(user_name):\n meeting_id = uuid.uuid4().hex\n bbb_params = {\n 'name': 'Karostartup Meeting',\n 'meetingID': meeting_id,\n 'attendeePW': 'ap',\n 'moderatorPW': user_name,\n 'record': 'true',\n }\n create_meeting_string = urlencode(bbb_params)\n checksum = hashlib.sha1(('create' + create_meeting_string + bbbSecret).encode('utf-8')).hexdigest()\n create_meeting_url = '{host}/create?{params}&checksum={checksum}'.format(\n host=bbbURL, params=create_meeting_string, checksum=checksum)\n response = requests.get(create_meeting_url)\n data = xmltodict.parse(response.text)\n return json.dumps(data), meeting_id\n\n\ndef join_meeting(user_name, meeting_id):\n join_params = {\n 'fullName': user_name,\n 'meetingID': meeting_id,\n 'password': \"mp\"\n }\n create_meeting_string = urlencode(join_params)\n checksum = hashlib.sha1(('join' + create_meeting_string + bbbSecret).encode('utf-8')).hexdigest()\n join_meeting_url = '{host}/join?{params}&checksum={checksum}'.format(\n host=bbbURL, params=create_meeting_string, checksum=checksum)\n return join_meeting_url\n","repo_name":"nirmalpopat/TrainingFeedbackForm","sub_path":"utils/bbb.py","file_name":"bbb.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71022204746","text":"# Create your views here.\nfrom multiprocessing import context\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.contrib.auth.models import User\nfrom .forms import Quizform\nfrom .models import *\n# Create your views here.\ndef acceuil(request):\n context={}\n return render(request,'acceuil.html',context)\n\n\n\ndef kreyeKont(request):\n if request.method =='POST':\n nom=request.POST.get('nom')\n modpas=request.POST.get('modpas')\n User.objects.create_user(username=nom, password=modpas)\n return redirect(acceuil)\n\n return render(request,'kreyekont.html')\n\ndef konekte(request):\n if request.method =='POST':\n nom=request.POST.get('nom')\n modpas=request.POST.get('modpas')\n user=authenticate(username=nom ,password=modpas)\n if user is not None:\n login(request,user)\n return redirect(acceuil)\n \n return render(request,'konekte.html')\n\n\ndef kreyekwiz(request):\n form=Quizform()\n if request.method== 'POST':\n form=Quizform(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/')\n else:\n form=Quizform()\n\n context={'form':form}\n return render(request,'kreyekwiz.html',context)\n\ndef jwe(request):\n \"\"\"if request.method == 'POST':\"\"\"\n jwet=Quiz.objects.all()\n context={'jwet':jwet}\n return render(request,'jwee.html',context)\n\n\"\"\"for a in jwet:\n if a.vre_repons ==request.POST.get(a.question):\n print('ou bon')\n\n else:\n print('ou pa ok')\"\"\"\n\ndef krayete(request):\n ote=User.quiz.set.all()\n print(ote)\n\ndef dekonekte(request):\n logout(request)\n return redirect(acceuil)","repo_name":"Jeudy37/the-kwizz","sub_path":"Quiz_M/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30258725507","text":"from GeneticAlgorithm import GeneticAlgorithm\nimport os\n\ngenerationCount = 0\nparams_dict = {\n \"dataset\": os.getcwd()+\"/nn/consolidation_dataset.csv\",\n \"populationSize\": 10,\n \"IsRegression\": True\n}\nga = GeneticAlgorithm(param_dict=params_dict)\n\nresults = ga.initialPopulation()\n\nprint(\"Generation: \"+str(generationCount))\nfor r in results:\n print(r.serialize())\n\nwhile generationCount < 10:\n results = ga.callback()\n print(\"Generation: \"+str(generationCount))\n for r in results:\n print(r.serialize()) \n generationCount += 1\n\nprint(\"Solution found.\")\nprint(ga.individuals[0].serialize())","repo_name":"atistech/master_thesis","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"4097765256","text":"import discord\nimport discord.components\nfrom player import downloader as dw\nfrom utils import errors\nfrom messages import message as ms\n\n\nclass TrackSelect(discord.ui.Select):\n def __init__(self, tracks: dict, vc, guild):\n super().__init__()\n self.options = [discord.SelectOption(label=track) for track in tracks]\n self.placeholder = 'Выберите трек'\n self.tracks = tracks\n self.vc = vc\n self.guild = guild\n\n async def callback(self, ctx):\n self.disabled = True\n track_id = self.tracks[self.values[0]]\n track = dw.Downloader.get_track_by_title(track_id)\n await ctx.response.edit_message(view=self.view)\n try:\n self.guild.add_track(track)\n except errors.FullQueue:\n await ctx.followup.send(ms.Message.full_queue())\n return\n\n is_playing = self.vc.is_playing()\n is_paused = self.vc.is_paused()\n if not is_playing and not is_paused:\n self.guild.play(self.vc)\n await ctx.followup.send(ms.Message.now_playing(track.name))\n else:\n await ctx.followup.send(ms.Message.added_to_queue(track.name))\n\n\n\nclass TrackSelectView(discord.ui.View):\n def __init__(self, tracks: list, *, vc, guild, timeout=100):\n super().__init__(timeout=timeout)\n self.add_item(TrackSelect(tracks, vc=vc, guild=guild))\n\n\n\n","repo_name":"un1i/discord-bot","sub_path":"player/message_components.py","file_name":"message_components.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13781248696","text":"\"\"\" home.settings.layout.separatorsheetsource\n\"\"\"\nfrom lxk_testlib.panelnav.core.enum import Action, Filter\nfrom lxk_testlib.panelnav.core.models.screen import Screen\n\n# Add screen and components below\nscreen = Screen()\n\nlocator = screen.add_locator(\"SEPARATOR SHEET SOURCE\", marker=True)\nlocator.area = \"ListWidget\"\nlocator.widget = \"textid='65772'\"\n\nlocator = screen.add_locator(\"OPTIONS LIST\")\nlocator.widget = \"ListWidget\"\n\nlocator = screen.add_locator(\"Tray 1\")\nlocator.area = \"ListWidget\"\nlocator.widget = \"textid='65633'\"\nlocator.add_action(\n action=Action.VERIFY,\n specifier=\"verify selected='%s'\"\n)\n","repo_name":"TrellixVulnTeam/CloudAutomation_KDSZ","sub_path":"venv/Lib/site-packages/lxk_testlib/panelnav/locators/panel28/home/settings/print/layout/separatorsheetsource/locators.py","file_name":"locators.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14440753475","text":"from pymongo import MongoClient\nfrom pymongo.errors import ConnectionFailure\nimport rospy\nfrom std_msgs.msg import String\nfrom nav2d_exploration.msg import Log\n\n \nclass DBManager:\n\n def __init__(self):\n self.connected = False\n \n def connectToDB(self):\n client = MongoClient(\"localhost\", 27017)\n try:\n client.admin.command('isMaster')\n rospy.logdebug(\"[DBManager] Connected to mongoDB\")\n self.connected = True\n self.db = client.blueprintLogXPS_CLOSEDOOR\n length = len(self.db.collection_names()) + 1\n collectionName = \"run\" + str(length)\n self.collection = self.db.create_collection(collectionName)\n except ConnectionFailure:\n self.connected = False\n rospy.logerr(\"[DBManager] Error in connecting to MongoDB\")\n \n def isConnected(self):\n return self.connected\n \n def disconnectFromDB(self):\n self.client.close()\n self.isConnected = False\n \n def saveToDB(self, log):\n self.collection.insert_one({'Time': log.time, 'TravelledDistance': log.travelled_distance, 'Alpha': log.alpha, 'FrontierDistance': log.frontier_distance,\n 'FrontierInfoGainFloorplan': log.frontier_information_gain_floorplan, 'FrontierInfoGainNoFloorplan': log.frontier_information_gain_no_floorplan,\n 'FrontierUtility': log.frontier_utility, 'FrontierInfoGainMode': log.frontier_information_gain_mode,\n 'CoveragePercentage': log.coverage_percentage});\n ","repo_name":"goldleaf3i/prior-maps-exploration","sub_path":"navigation_2d-master/nav2d_exploration/src/nav2d_exploration/db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"6977539299","text":"import tensorflow as tf\nfrom tensorflow.keras import losses\nimport numpy as np\nfrom random import shuffle\nfrom batch_generator import *\nfrom entity_main_model import entity_main_model\nimport matplotlib.pyplot as plt\nimport os\nimport pickle\nimport time\n\ntf.enable_eager_execution()\n\n\ndef reshape(a, i, rep):\n a = np.expand_dims(a, axis=i)\n return np.repeat(a, axis=i, repeats=rep)\n\n\ndef one_hot(a):\n b = np.zeros((len(a), 2))\n b[np.arange(len(a)), a] = 1\n return reshape(b, 1, 20)\n\n\ndef get_loss(inputs, model, y_true, mask):\n y_pred = model(inputs)\n return tf.reduce_mean(\n tf.multiply(losses.categorical_crossentropy(one_hot(y_true), y_pred),\n mask))\n\n\ndef grad(inputs, model, labels, mask):\n with tf.GradientTape() as tape:\n loss = get_loss(inputs, model, labels, mask)\n return loss, tape.gradient(loss, model.variables)\n\n\ndef train_plot(train_loss_results, train_accuracy_results):\n fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))\n fig.suptitle('Train Metrics')\n\n axes[0].set_ylabel(\"Loss\", fontsize=14)\n axes[0].plot(train_loss_results)\n\n axes[1].set_ylabel(\"Accuracy\", fontsize=14)\n axes[1].plot(train_accuracy_results)\n axes[1].set_xlabel(\"Epoch\", fontsize=14)\n\n\ndef train(model,\n learning_rate,\n num_epochs,\n save_path,\n batch_size,\n train_steps=50,\n validation_steps=1000):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n tfe = tf.contrib.eager\n train_loss_results = []\n train_accuracy_results = []\n validation_accuracy_results = []\n step = 0\n loss_avg = tfe.metrics.Mean()\n train_accuracy = tfe.metrics.Accuracy()\n batchgenerator = batch_generator(batch_size)\n\n for batch in batchgenerator:\n #inputs = (seq_encoded, masks, keys_vals)\n inputs = batch[:3]\n keys_mask = batch[3]\n labels = batch[4]\n outputs = model(inputs)\n loss, grads = grad(inputs, model, labels, keys_mask)\n optimizer.apply_gradients(zip(grads, model.variables),\n tf.train.get_or_create_global_step())\n loss_avg(loss)\n train_accuracy(\n np.argmax(np.apply_along_axis(np.bincount, 1,\n np.argmax(outputs.numpy(), axis=2)),\n axis=1), labels)\n step += 1\n\n if step % train_steps == 0:\n train_loss_results.append(loss_avg.result().numpy())\n train_accuracy_results.append(train_accuracy.result().numpy())\n print(\"Step \" + str(step) + \": \" + \"Train Loss= \" +\n str(loss_avg.result().numpy()) + \", Train Accuracy= \" +\n str(train_accuracy.result().numpy()))\n loss_avg = tfe.metrics.Mean()\n train_accuracy = tfe.metrics.Accuracy()\n\n if step % validation_steps == 0:\n validation_accuracy = tfe.metrics.Accuracy()\n validationgenerator = validation_generator(batch_size)\n for validation in validationgenerator:\n x = validation[:3]\n y = validation[4]\n print(model(x).numpy().shape)\n validation_accuracy(\n np.argmax(np.apply_along_axis(\n np.bincount, 1, np.argmax(model(x).numpy(),\n axis=2)),\n axis=1), y)\n\n validation_accuracy_results.append(\n validation_accuracy.result().numpy())\n print(\"Validation Accuracy= \" +\n str(validation_accuracy.result().numpy()))\n\n checkpoint_dir = save_path\n os.makedirs(checkpoint_dir, exist_ok=True)\n checkpoint_dir = os.path.join(checkpoint_dir, 'ckpt')\n tfe.Saver(model.variables).save(checkpoint_dir)\n\n with open('train_accuracy_results.pkl', 'wb') as pkl:\n pickle.dump(train_accuracy_results, pkl)\n\n with open('train_loss_results.pkl', 'wb') as pkl:\n pickle.dump(train_loss_results, pkl)\n\n with open('validation_accuracy_results.pkl', 'wb') as pkl:\n pickle.dump(validation_accuracy_results, pkl)\n\n train_plot(train_loss_results, train_accuracy_results)\n\n\n# validation_plot(validation_accuracy_results)\n\nif __name__ == \"__main__\":\n model = entity_main_model(\"EntityModel\", 20, 768, 768, 80, 0)\n train(model, 0.001, 1, './ModelVariables', 64)\n","repo_name":"ShenakhtPajouh/transposition-simple","sub_path":"entity_network/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11838875102","text":"import re\nimport sys\nimport os\nimport os.path\n\ndef removeMinusOneFromConllFile(lines):\n \"\"\"\n Function to remove the \"-1\" in the gov column of conll Files\n \"\"\"\n i=0\n while i avail:\n # explore right branch only\n result = MaxProfit(toConsider[1:], avail)\n else:\n nextItem = toConsider[0]\n # explore left branch\n withVal, withToTake = MaxProfit(toConsider[1:], avail - nextItem.get_credit_line())\n withVal += nextItem.getNopat()\n # explore right branch\n withoutVal, withoutToTake = MaxProfit(toConsider[1:], avail)\n # Choose better branch\n if withVal > withoutVal:\n result = (withVal, withToTake + (nextItem,))\n else:\n result = (withoutVal, withoutToTake)\n\n return result\n\n\ndef testMaxProfit(Matrix, credit_left):\n print('Use search tree to make choice within a limit of', credit_left, 'credit')\n profit, chosen = MaxProfit(Matrix, credit_left)\n profit2 = profit + start_Nopat\n print('Total Nopat after making all the choices = ', profit2)\n for item in chosen:\n print(' ', item)\n","repo_name":"natalienie/SNC-Financial-Simulation-","sub_path":"SNC_simulation.py","file_name":"SNC_simulation.py","file_ext":"py","file_size_in_byte":7237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41768062496","text":"#!/usr/bin/env python\n\nfrom pylab import *\n\nfigure(figsize=(8,5), dpi=80)\nsubplot(111)\n\nX = np.linspace(-np.pi, 1.2*np.pi, 256,endpoint=True)\nC = 1 - np.cos(X-np.pi/8)\nS = 1 - np.cos( 3 * (X-np.pi/8))\n\nplot(X, C, color=\"blue\", linewidth=1.5, linestyle=\"--\", label=r\"$V_1 = 1-\\cos(\\phi-\\phi_0)$\")\nplot(X, S, color=\"red\", linewidth=1.5, linestyle=\"--\", label=r\"$V_3 = 1-\\cos 3(\\phi-\\phi_0)$\")\nplot(X, C+0.5*S, color=\"green\", linewidth=2.5, linestyle=\"-\", label=r\"$V_1 + 0.5 \\times V_3$\")\n\nax = gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data',0))\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0))\n\nxlim(X.min()*1.1, X.max()*1.1)\nxticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi, np.pi/8],\n [r'$-\\pi$', r'$-\\pi/2$', r'$0$', r'$+\\pi/2$', r'$+\\pi$', r'$\\phi_0$'])\n\nylim(-0.8,5)\nyticks([+2, +1, +3],\n [r'$+2$', r'$+1$', r'$+3$'])\n\nt = np.pi/8\nplot([t,t],[0,3.2],\n color ='green', linewidth=1., linestyle=\"--\")\n\nlegend(loc='upper left')\n\nsavefig(\"dihedral.svg\", dpi=72)\nshow()\n","repo_name":"noinil/pinang","sub_path":"share/figures/energy_function/dihedral.py","file_name":"dihedral.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"28520331661","text":"# survey/views.py\n\nfrom io import BytesIO\nimport io\nimport json\nimport os\nfrom django.conf import settings\nfrom django.contrib.auth import login\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.shortcuts import render, redirect, reverse, get_object_or_404, render\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.encoding import force_bytes, force_str\nfrom django.contrib import messages\nfrom datetime import datetime\n\nfrom django.views import View\nimport openpyxl\nfrom openpyxl.utils import get_column_letter\n\nfrom .models import Survey, Question, Choice, SurveyResponse\nfrom .tokens import user_tokenizer\nfrom .forms import RegistrationForm, SurveyResponseForm\nfrom django.contrib.auth import get_user_model\n\nimport io\nfrom django.http import HttpResponse\nfrom reportlab.graphics.shapes import Drawing\nfrom reportlab.graphics.charts.piecharts import Pie\nfrom reportlab.graphics import renderPDF\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib import colors\nfrom reportlab.platypus import Table, TableStyle\n\ndef generate_report(survey):\n response_data = []\n \n for question in survey.questions.all():\n if not question.istext:\n choices = question.choices.all()\n total_responses = question.question_responses.count()\n question_data = []\n \n for choice in choices:\n num_responses = choice.choices_selected.count()\n if total_responses == 0:\n percentage = 0\n else:\n percentage = num_responses / total_responses * 100\n \n question_data.append([choice.text, percentage])\n \n response_data.append([question.text, question_data])\n \n buffer = io.BytesIO()\n c = canvas.Canvas(buffer, pagesize=letter)\n \n i=0\n for question_data in response_data:\n question_text = question_data[0]\n choice_data = question_data[1]\n \n drawing = Drawing(400, 400)\n pie = Pie()\n pie.x = 100\n pie.y = 250-i\n pie.width = 150\n pie.height = 150\n \n data = []\n labels = []\n for choice in choice_data:\n data.append(choice[1])\n labels.append(choice[0])\n \n pie.data = data\n pie.labels = labels\n pie.slices.strokeWidth = 0.5\n pie.slices.popout = 5\n drawing.add(pie)\n\n # Create a custom legend with corresponding percentages\n legend_data = []\n for choice in choice_data:\n legend_data.append([choice[0], f\"{choice[1]:.2f}%\"])\n\n legend_table = Table(legend_data, colWidths=[100, 50])\n legend_table.setStyle(TableStyle([\n ('BACKGROUND', (0, 0), (-1, -1), colors.white),\n ('TEXTCOLOR', (0, 0), (-1, -1), colors.black),\n ('ALIGN', (1, 0), (-1, -1), 'RIGHT'),\n ('FONTNAME', (0, 0), (-1, -1), 'Helvetica'),\n ('FONTSIZE', (0, 0), (-1, -1), 7),\n ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),\n ('BOX', (0, 0), (-1, -1), 0.25, colors.black),\n ]))\n legend_table.wrapOn(c, 100, 400)\n legend_table.drawOn(c, 350, 550-(3*i))\n\n c.drawString(50, 700-(2.5*i), question_text)\n renderPDF.draw(drawing, c, 50, 250-(2*i))\n i+=120\n if i > 121:\n i=0\n c.showPage()\n \n c.save()\n buffer.seek(0)\n \n response = HttpResponse(buffer, content_type='application/pdf')\n response['Content-Disposition'] = f'attachment; filename={survey.title}_Results.pdf'\n return response\n\n\nUser = get_user_model()\n\ndef update_surveys():\n now = datetime.now()\n expired_surveys = Survey.objects.filter(deadline__lt=now, expired=False)\n for survey in expired_surveys:\n user = survey.created_by\n points = survey.allocated_points\n survey.expired = True\n survey.allocated_points -= points\n if points > 0:\n user.points += points\n survey.save()\n user.save()\n\ndef home(request):\n return render(request, 'survey/home.html', {})\n\ndef SurveyListview(request):\n update_surveys()\n surveys = Survey.objects.filter(expired=False)\n user_responses = SurveyResponse.objects.filter(created_by=request.user)\n answered_surveys = [response.question.survey for response in user_responses]\n unanswered_surveys = surveys.exclude(\n id__in=[survey.id for survey in answered_surveys if survey]\n ).exclude(\n allocated_points=0\n ) \n sorted_surveys = sorted(unanswered_surveys, key=lambda survey: survey.priorityValue(), reverse=True)\n context = {'surveys': sorted_surveys}\n return render(request, 'survey/list_surveys.html', context)\n\ndef survey_submitted(request):\n return render(request, 'survey/survey_submitted.html')\n\ndef survey_expired(request):\n return render(request, 'survey/survey_expired.html')\n\ndef fill_survey(request, survey_id):\n survey = get_object_or_404(Survey, id=survey_id)\n if survey.expired == True:\n return HttpResponseRedirect(reverse('survey_expired'))\n questions = survey.questions.all()\n form = SurveyResponseForm(questions=questions)\n\n if request.method == 'POST':\n form = SurveyResponseForm(request.POST, questions=questions)\n if form.is_valid():\n for question, choice_id in form.cleaned_data.items():\n if question.startswith('question_'):\n question_id = question.replace('question_', '')\n question = Question.objects.get(id=question_id)\n if question.istext:\n choice = Choice.objects.create(text=choice_id, question_id=question_id)\n else: \n choice = Choice.objects.get(id=choice_id)\n if request.user.is_authenticated:\n user = request.user\n if request.POST.get('anonymous') == 'True':\n SurveyResponse.objects.create(is_anonymous=True, question_id=question_id, choice=choice, created_by=user)\n else:\n SurveyResponse.objects.create(question_id=question_id, choice=choice, created_by=user) \n else:\n SurveyResponse.objects.create(question_id=question_id, choice=choice) \n if request.user.is_authenticated: \n user.points += 1\n if request.POST.get(\"group\") != \"\":\n user.group_name = request.POST.get(\"group\")\n user.save()\n survey.allocated_points -= 1 \n survey.save()\n return HttpResponseRedirect(reverse('survey_submitted'))\n\n context = {\n 'survey': survey,\n 'form': form,\n }\n return render(request, 'survey/fill_survey.html', context)\n\nclass SharePointsView(View):\n\n def post(self, request):\n \n context = {}\n username = request.POST.get('username')\n points = int(request.POST.get('points'))\n \n try:\n receiver = User.objects.get(username=username)\n except User.DoesNotExist:\n messages.error(request, 'User does not exist')\n context['username_error'] = 'User does not exist'\n return render(request, 'survey/share_points.html', context)\n \n sender = request.user\n if points < 1 or points > sender.points:\n messages.error(request, 'Invalid points value')\n context['points_error'] = 'Invalid points value'\n return render(request, 'survey/share_points.html', context)\n \n sender.points -= points\n receiver.points += points\n sender.save()\n receiver.save()\n \n messages.success(request, f'Successfully shared {points} points with {receiver.username}')\n context['success'] = f'Successfully shared {points} points with {receiver.username}'\n return render(request, 'survey/profile.html', context)\n\n def get(self, request):\n\n context = {}\n return render(request, 'survey/share_points.html', context)\n\nclass RegisterView(View):\n def get(self, request):\n return render(request, 'survey/register.html', { 'form': RegistrationForm() })\n\n def post(self, request):\n form = RegistrationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_valid = False\n user.save()\n #token = user_tokenizer.make_token(user)\n #user_id = urlsafe_base64_encode(force_bytes(str(user.id)))\n #url = 'http://localhost:8000' + reverse('confirm_email', kwargs={'user_id': user_id, 'token': token})\n #message = get_template('survey/register_email.html').render({'confirm_url': url})\n #mail = EmailMessage('SurFill Email Confirmation', message, to=[user.email], from_email=settings.EMAIL_HOST_USER)\n #mail.content_subtype = 'html'\n #mail.send()\n\n return render(request, 'survey/login.html', {\n 'form': AuthenticationForm(),\n 'message': f'Welcome to SurFill {user.username}. You can now login'\n })\n\n return render(request, 'survey/register.html', { 'form': form })\n\n# this view class was replaced by django.contrib.auth.views.LoginView\nclass LoginView(View):\n def get(self, request):\n return render(request, 'survey/login.html', { 'form': AuthenticationForm() })\n\n def post(self, request):\n form = AuthenticationForm(request, data=request.POST)\n remember_me = form.cleaned_data['remember_me'] # get remember me data from cleaned_data of form\n if not remember_me:\n self.request.session.set_expiry(0) # if remember me is \n self.request.session.modified = True\n if form.is_valid():\n try:\n form.clean()\n except ValidationError:\n return render(\n request,\n 'survey/login.html',\n { 'form': form, 'invalid_creds': True }\n )\n\n login(request, form.get_user())\n\n return redirect(reverse('profile'))\n\n return render(request, 'survey/login.html', { 'form': form })\n\n\nclass ProfileView(LoginRequiredMixin, View):\n\n def get(self, request):\n surveys = Survey.objects.filter(created_by=request.user)\n\n context = {'surveys': surveys}\n update_surveys()\n return render(request, 'survey/profile.html', context)\n\n\nclass SurveyEditView(LoginRequiredMixin, View):\n \n def get(self, request, survey_id):\n survey = get_object_or_404(Survey, id=survey_id)\n context = {'survey': survey}\n return render(request, 'survey/edit_survey.html', context)\n\n def post(self, request, survey_id):\n\n oldsurvey = get_object_or_404(Survey, pk=survey_id)\n data = request.POST\n print(data.get('anonymous'))\n \n title = data.get('title')\n questions_json = data.getlist('questions')\n deadline = data.get('deadline')\n allocated_points = data.get('points')\n\n if allocated_points == '':\n allocated_points = 0\n\n valid = True\n context = {'survey': oldsurvey}\n\n if not title:\n valid = False\n context['title_error'] = 'title is required'\n\n if not deadline:\n valid = False\n context['deadline_error'] = 'deadline is required' \n\n if not questions_json:\n valid = False\n context['questions_error'] = 'questions are required'\n \n if not valid:\n context['users'] = User.objects.all()\n return render(request, 'survey/edit_survey.html', context)\n\n if request.POST.get('anonymous') == 'True':\n survey = Survey.objects.create(responder_info_required= False, title=title, created_by=request.user, created_at = oldsurvey.created_at,\n deadline = deadline, allocated_points=(allocated_points + oldsurvey.allocated_points))\n oldsurvey.delete()\n\n else:\n survey = Survey.objects.create(title=title, created_by=request.user, created_at = oldsurvey.created_at,\n deadline = deadline, allocated_points=(int(allocated_points) + oldsurvey.allocated_points))\n oldsurvey.delete() \n\n request.user.points -= int(allocated_points)\n request.user.save() \n\n for question_json in questions_json:\n question_data = json.loads(question_json)\n question = Question.objects.create(text=question_data['text'], survey=survey)\n if not question_data['choices']:\n question.istext = True\n question.save()\n else:\n for choice_data in question_data['choices']:\n Choice.objects.create(text=choice_data['text'], question=question)\n\n\n # Show success message and redirect to survey list\n messages.success(request, 'Survey updated successfully!')\n return redirect('profile')\n\n\nclass SurveyCreateView(LoginRequiredMixin, View):\n def get(self, request):\n users = User.objects.all()\n return render(request, 'survey/create_survey.html', {'users': users})\n \n def post(self, request):\n data = request.POST\n \n title = data.get('title')\n questions_json = data.getlist('questions')\n deadline = data.get('deadline')\n allocated_points = data.get('points')\n if allocated_points == '':\n allocated_points = 0\n valid = True\n context = {}\n if not title:\n valid = False\n context['title_error'] = 'title is required'\n\n if not deadline:\n valid = False\n context['deadline_error'] = 'deadline is required' \n\n if not questions_json:\n valid = False\n context['questions_error'] = 'questions are required'\n \n if not valid:\n context['users'] = User.objects.all()\n return render(request, 'survey/create_survey.html', context)\n\n if request.POST.get('anonymous') == 'True':\n survey = Survey.objects.create(responder_info_required= False, title=title, created_by=request.user, \n deadline = deadline, allocated_points=allocated_points)\n else:\n survey = Survey.objects.create(title=title, created_by=request.user, \n deadline = deadline, allocated_points=allocated_points)\n\n request.user.points -= int(allocated_points)\n request.user.save() \n for question_json in questions_json:\n question_data = json.loads(question_json)\n question = Question.objects.create(text=question_data['text'], survey=survey)\n if not question_data['choices']:\n question.istext = True\n question.save()\n else:\n for choice_data in question_data['choices']:\n Choice.objects.create(text=choice_data['text'], question=question)\n\n return redirect(reverse('profile'))\n\n\nclass QuestionViewModel:\n def __init__(self, text):\n self.text = text\n self.choices = []\n\n def add_survey_response(self, survey_response):\n for choice in self.choices:\n if choice.id == survey_response.choice.id:\n choice.responses += 1\n break\n\n\nclass ChoiceResultViewModel:\n def __init__(self, id, text, responses=0):\n self.id = id\n self.text = text\n self.responses = responses\n\n\nclass SurveyResultsView(View):\n\n def get_object(self):\n self.obj = get_object_or_404(Survey, pk=self.kwargs['survey_id'])\n return self.obj\n\n def get(self, request, survey_id):\n self.obj = get_object_or_404(Survey, pk=self.kwargs['survey_id'])\n questions = []\n for question in self.obj.questions.all():\n question_vm = QuestionViewModel(question.text)\n for choice in question.choices.all():\n question_vm.choices.append(ChoiceResultViewModel(choice.id, choice.text))\n \n for survey_response in SurveyResponse.objects.filter(question=question):\n question_vm.add_survey_response(survey_response)\n \n questions.append(question_vm)\n\n context = {'survey': self.obj, 'questions': questions}\n \n return render(request, 'survey/survey_results.html', context)\n\n def post(self, request, survey_id):\n if 'downloadpdf' in request.POST:\n self.obj = get_object_or_404(Survey, pk=self.kwargs['survey_id'])\n return generate_report(self.obj)\n\n if 'download' in request.POST:\n self.obj = get_object_or_404(Survey, pk=self.kwargs['survey_id'])\n questions = []\n for question in self.obj.questions.all():\n question_vm = QuestionViewModel(question.text)\n for choice in question.choices.all():\n question_vm.choices.append(ChoiceResultViewModel(choice.id, choice.text))\n \n for survey_response in SurveyResponse.objects.filter(question=question):\n question_vm.add_survey_response(survey_response)\n \n questions.append(question_vm)\n\n for question in questions:\n print(question.text)\n for choice in question.choices:\n print(choice.text, choice.responses)\n print()\n return export_survey_results(self.obj, questions)\n\n if 'delete' in request.POST:\n survey = self.get_object()\n if survey.allocated_points > 0:\n request.user.points += survey.allocated_points\n request.user.save()\n survey = Survey.objects.get(id=survey_id)\n survey.delete()\n return redirect('profile')\n survey = get_object_or_404(Survey, id=survey_id)\n points = int(request.POST.get('points', 0))\n if points > 0:\n survey.allocated_points += points\n survey.save()\n request.user.points -= int(points)\n request.user.save()\n return redirect('profile') \n elif points != 0:\n survey.allocated_points -= abs(points)\n survey.save()\n request.user.points += abs(int(points))\n request.user.save()\n return redirect('profile')\n return redirect('survey_results', survey_id=survey_id) \n\n\nclass TestEmail(View):\n def get(self, request):\n user = User.objects.get(pk=9)\n token = user_tokenizer.make_token(user)\n user_id = urlsafe_base64_encode(force_bytes(user.id))\n url = 'http://localhost:8000' + reverse('confirm_email', kwargs={'user_id': user_id, 'token': token})\n message = get_template('survey/register_email.html').render({\n 'confirm_url': url\n })\n mail = EmailMessage('Django Survey Email Confirmation', message, to=[user.email], from_email=settings.EMAIL_HOST_USER)\n mail.content_subtype = 'html'\n mail.send()\n return HttpResponse(f'email sent user_id = {user_id}, token = {token}')\n\n\nclass ConfirmRegistrationView(View):\n def get(self, request, user_id, token):\n user_id = force_str(urlsafe_base64_decode(force_bytes(user_id)))\n \n user = User.objects.get(pk=user_id)\n\n context = {\n 'form': AuthenticationForm(),\n 'message': 'Registration confirmation error . Please click the reset password to generate a new confirmation email.'\n }\n if user and user_tokenizer.check_token(user, token):\n user.is_valid = True\n user.save()\n context['message'] = 'Registration complete. Please login'\n\n return render(request, 'survey/login.html', context)\n\ndef export_survey_results(survey, questions):\n wb = openpyxl.Workbook()\n \n # iterate over each question\n for question in survey.questions.all():\n # create a new sheet with the question text as the title\n sheet = wb.create_sheet(title=str(question.text).replace(\"?\", \"\"))\n \n # add the table headers\n sheet['A1'] = 'Answer'\n sheet['B1'] = 'Email'\n sheet['C1'] = 'Username'\n sheet['D1'] = 'Group'\n \n \n # iterate over each response and add it to the sheet\n for index, response in enumerate(question.question_responses.all()):\n sheet.cell(row=index+2, column=1, value=response.choice.text)\n if (response.is_anonymous != True) and (response.created_by is not None):\n sheet.cell(row=index+2, column=2, value=(response.created_by.get_email()))\n sheet.cell(row=index+2, column=3, value=(response.created_by.get_username()))\n sheet.cell(row=index+2, column=4, value=(response.created_by.get_group()))\n else:\n sheet.cell(row=index+2, column=2, value=(\"Anonymous\"))\n sheet.cell(row=index+2, column=3, value=(\"Anonymous\"))\n sheet.cell(row=index+2, column=4, value=(\"Anonymous\"))\n \n\n sheet.column_dimensions[get_column_letter(1)].width = 50\n sheet.column_dimensions[get_column_letter(2)].width = 30\n sheet.column_dimensions[get_column_letter(3)].width = 30\n sheet.column_dimensions[get_column_letter(4)].width = 30\n \n \n first_sheet = wb.sheetnames[0]\n wb.remove(wb[first_sheet])\n\n # create a response object with the Excel file\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n title = survey.title + \"_Results.xlsx\"\n response['Content-Disposition'] = f'attachment; filename=\"{title}\"'\n wb.save(response)\n return response\n","repo_name":"yassineDr12/SurFill","sub_path":"survey/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8670673795","text":"from __future__ import division\nclass PID:\n\n def __init__(self, p_gain, i_gain, d_gain):\n self.last_error = 0.0\n self.p_gain = p_gain\n self.i_gain = i_gain\n self.d_gain = d_gain\n self.i_error = 0.0\n self.max = 5\n\n def updatePID(self, p_gain, i_gain, d_gain):\n self.p_gain = p_gain\n self.i_gain = i_gain\n self.d_gain = d_gain\n\n\n def Compute(self, input, target, dt, err = 0):\n\n error = target - input\n if err != 0:\n error = err\n\n p_error = error\n\n\n self.i_error += self.last_error * dt\n i_error = self.i_error\n\n\n d_error = (error - self.last_error) / dt\n\n\n p_output = self.p_gain * p_error\n i_output = self.i_gain * i_error\n d_output = self.d_gain * d_error\n\n\n self.last_error = error\n out = p_output+ i_output+ d_output\n if out > self.max:\n out=self.max\n if out < (-1 * self.max):\n out = (-1 * self.max)\n return int(out)\n","repo_name":"FighterBay/Multi-Agent-Simulator","sub_path":"PID.py","file_name":"PID.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929219786","text":"from typing import List\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n res = 0\n prev = prices[0]\n\n for price in prices[1:]:\n if price > prev:\n res += price - prev\n prev = price\n\n return res\n\nsol = Solution()\nprint(sol.maxProfit([7,1,5,3,6,4]))\n\n\"\"\"\nleetcode 122\n그리디 알고리즘\n\"\"\"\n\n","repo_name":"boorooksus/Algorithm-Study","sub_path":"LeetCode/1회차/B78_BestTimeToBuyAndSellStock(2).py","file_name":"B78_BestTimeToBuyAndSellStock(2).py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74235985865","text":"import os\nos.add_dll_directory(\"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2/bin\")\nimport tensorflow as tf\nfrom PIL import Image\n\n\n\n\npath = \"C:/Users/gmocc/Documents/GitHub/ProgettoDigitali/provaPython/faceRecognition/datasets/face_dataset_train_images/prova/\"\nfiles = os.listdir(path)\nfor filename in files:\n print(\"esamino \"+ str(filename))\n im = Image.open(path + filename)\n im.save(path + filename + \".png\")\n","repo_name":"giorgio-mocci/FaceRecognition","sub_path":"convertiImg.py","file_name":"convertiImg.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33522082776","text":"# datahandler.py\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n'''\nThis file is used for data preprocessing and data preparation for forecasting.\n\n'''\n\ndef load_data(file_name):\n \"\"\"Import dataframe.\"\"\"\n return pd.read_csv(file_name)\n\ndef info():\n \"\"\"To see the dataframe's basic information.\"\"\"\n Xero = load_data('Data.csv')\n Xero['Date'] = pd.to_datetime(Xero['Date'])\n Xero.info()\n Xero.Net_Income.describe()\n\n plt.figure(figsize = (10,5))\n print('skew: ', Xero.Net_Income.skew())\n sns.distplot(Xero['Net_Income'])\n # plt.show()\n \n\ndef corr():\n \"\"\"Data preparation for forecasting.\"\"\"\n Xero = load_data('Data.csv')\n corrMat = Xero.corr()\n mask = np.array(corrMat)\n mask[np.tril_indices_from(mask)] = False\n plt.subplots(figsize=(20,10))\n sns.heatmap(corrMat, mask=mask, vmax=0.8, square=True, annot=True)\n # plt.show()\n\n print(corrMat['Net_Income'].sort_values(ascending=False))\n\n dic = corrMat['Net_Income'].to_dict()\n lis = []\n for i in dic.values():\n if abs(i) < 0.4:\n lis.append(i)\n for j in dic.items():\n for k in lis:\n if j[1] == k:\n del Xero[j[0]]\n Xero['Date'] = pd.to_datetime(Xero['Date'])\n Xero.to_csv('Features.csv', index=False)\n","repo_name":"ZhudongQiu/Xero","sub_path":"mylib/datahandler.py","file_name":"datahandler.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6730279876","text":"import json\n\nclass Meniu:\n\n #meniu = \"Login : \"\n invalid = \"Invalid login\"\n\n def login(self):\n #deschide angajatii si cauta daca e userul in lista dupa id_unic. Daca e returneaza o lista cu un singur user\n file_angajatii = open(\"angajatii.json\",\"r\")\n angajatii = json.load(file_angajatii)\n file_angajatii.close()\n\n lista = []\n id = input(\"Login ID : \")\n for j in angajatii:\n lista.append(angajatii[j][2])\n\n if id not in lista:\n print(self.invalid)\n else:\n for i in angajatii:\n if id in angajatii[i]:\n return angajatii[i]","repo_name":"MariaBaghiu/Fabrica","sub_path":"meniu.py","file_name":"meniu.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26312118821","text":"#!/usr/bin/env python\n#coding=utf-8\n\nimport time\nfrom six.moves import configparser\nimport numpy as np\n\nfrom ._core import *\n\ndef run_config(f_config):\n section_common = 'FILES'\n section_option = 'GMMREG_OPT'\n\n c = configparser.ConfigParser()\n c.read(f_config)\n model_file = c.get(section_common, 'model')\n scene_file = c.get(section_common, 'scene')\n model = np.loadtxt(model_file)\n scene = np.loadtxt(scene_file)\n try:\n ctrl_pts_file = c.get(section_common, 'ctrl_pts')\n ctrl_pts = np.loadtxt(ctrl_pts_file)\n except:\n ctrl_pts = model\n level = int(c.get(section_option, 'level'))\n option_str = c.get(section_option, 'sigma')\n scales = [float(s) for s in option_str.split(' ')]\n option_str = c.get(section_option, 'lambda')\n lambdas = [float(s) for s in option_str.split(' ')]\n\n option_str = c.get(section_option, 'max_function_evals')\n iters = [int(s) for s in option_str.split(' ')]\n\n normalize_flag = int(c.get(section_option, 'normalize'))\n if normalize_flag==1:\n [model, c_m, s_m] = normalize(model)\n [scene, c_s, s_s] = normalize(scene)\n [ctrl_pts, c_c, s_c] = normalize(ctrl_pts)\n t1 = time.time()\n after_tps = run_multi_level(model,scene,ctrl_pts,level,scales,lambdas,iters)\n if normalize_flag==1:\n model = denormalize(model,c_m,s_m)\n scene = denormalize(scene,c_s,s_s)\n after_tps = denormalize(after_tps,c_s,s_s)\n t2 = time.time()\n print(\"Elasped time: {} seconds\".format(t2-t1))\n return model, scene, after_tps\n","repo_name":"bing-jian/gmmreg-python","sub_path":"src/_run_config.py","file_name":"_run_config.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"81"} +{"seq_id":"73126371785","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n def floodFill(self, image: List[List[int]], sr: int, sc: int, color: int) -> List[List[int]]:\r\n oldcolor =image[sr][sc]\r\n def changecolor(sr,sc):\r\n \r\n if 0<=sr ArgumentParser:\n \"\"\"\n Return a parser that contains the --overwrite, --dry-run, and --verbose\n flags for use in generator scripts.\n \"\"\"\n parser = ArgumentParser(add_help=False)\n parser.add_argument(\"--overwrite\", action=argparse.BooleanOptionalAction)\n parser.add_argument(\"--dry-run\", action=\"store_true\")\n parser.add_argument(\"--verbose\", action=\"store_true\")\n return parser\n\n\ndef add_wandb_options(parser: ArgumentParser) -> ArgumentParser:\n \"\"\"\n Add flags to the given parser for controlling runs with wandb.\n\n Flags added are:\n\n --dev | -D Set wandb to offine mode, allowing testing code without\n uploading results to wandb.\n\n --disable-wandb Disable wandb.\n \"\"\"\n\n parser.add_argument(\n \"-D\",\n \"--dev\",\n dest=\"mode\",\n action=\"store_const\",\n const=\"offline\",\n help=(\n \"run in development mode. This disables syncing\"\n \" of wandb runs, but still shows run metrics in\"\n \" the console\"\n ),\n )\n\n parser.add_argument(\n \"--disable-wandb\",\n dest=\"mode\",\n action=\"store_const\",\n const=\"disabled\",\n help=\"disable wandb.\",\n )\n\n return parser\n\n\ndef print_title(text: str) -> None:\n \"\"\"\n Print a title.\n\n Title text\n ==========\n\n \"\"\"\n\n print(f\"{text}\\n{''.join(['=' for _ in text])}\")\n","repo_name":"niklasdewally/graph-transfer-learning","sub_path":"src/gtl/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28091976064","text":"from days import parser\nfrom days.utilities import timer\n\nfrom queue import PriorityQueue\n\n\nclass Graph:\n neighbors = [(0, -1), (0, 1), (-1, 0), (1, 0)]\n\n @timer\n def __init__(self, data):\n self.max_x = len(data[0])\n self.max_y = len(data)\n self.vertices = [(i, j) for j in range(self.max_y) for i in range(self.max_x)]\n self.edges = self.get_edges(data)\n\n def get_edges(self, data):\n edges = {v: [] for v in self.vertices}\n for e in edges:\n for n in self.neighbors:\n cx = e[0] + n[0]\n cy = e[1] + n[1]\n if 0 <= cx < self.max_x and 0 <= cy < self.max_y:\n edges[e].append(((cx, cy), data[cy][cx]))\n return edges\n\n\n@timer\ndef dijkstra(graph, start_vertex):\n distances = {v: float('inf') for v in graph.vertices}\n distances[start_vertex] = 0\n\n visited = set()\n\n pq = PriorityQueue()\n pq.put((0, start_vertex))\n\n while not pq.empty():\n (dist, current_vertex) = pq.get()\n visited.add(current_vertex)\n neighbors = graph.edges[current_vertex]\n\n for neighbor, distance in neighbors:\n if neighbor not in visited:\n old_cost = distances[neighbor]\n new_cost = distances[current_vertex] + distance\n if new_cost < old_cost:\n pq.put((new_cost, neighbor))\n distances[neighbor] = new_cost\n\n return distances\n\n\ndef upscale_data(data):\n part_upscaled = []\n for d_row in data:\n part_upscaled.append([((d+i-1) % 9) + 1 for i in range(5) for d in d_row])\n upscaled = []\n for i in range(5):\n for d_row in part_upscaled:\n upscaled.append([((d + i - 1) % 9) + 1 for d in d_row])\n return upscaled\n\n\n@timer\ndef part01(data):\n graph = Graph(data)\n lengths = dijkstra(graph, (0, 0))\n return lengths[(graph.max_x - 1, graph.max_y - 1)]\n\n\n@timer\ndef part02(data):\n new_data = upscale_data(data)\n graph = Graph(new_data)\n lengths = dijkstra(graph, (0, 0))\n return lengths[(graph.max_x - 1, graph.max_y - 1)]\n\n\n@timer\ndef load_data():\n return parser.load_rows_to_list(\"day15.txt\", lambda line: [int(x) for x in line.strip()])\n\n\n@timer\ndef main():\n data = load_data()\n print(part01(data))\n print(part02(data))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"filakrad/adventOfCode","sub_path":"2021/day15/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"5735044437","text":"# -*- coding:utf8 -*-\r\n\r\nimport os\r\n\r\nclass BatchRename():\r\n def __init__(self):\r\n self.path = 'data/all/' #表示需要命名处理的文件夹目录,复制地址后注意反斜杠\r\n\r\n def rename(self):\r\n filelist = os.listdir(self.path) #获取文件路径\r\n total_num = len(filelist) #获取文件长度(文件夹下图片个数)\r\n i = 0 #表示文件的命名是从1开始的\r\n for item in filelist:\r\n if item.endswith('.jpg') or item.endswith('.png'): #初始的图片的格式为jpg格式的(或者源文件是png格式及其他格式,后面的转换格式就可以调整为自己需要的格式即可,我习惯转成.jpg)\r\n src = os.path.join(os.path.abspath(self.path), item)\r\n dst = os.path.join(os.path.abspath(self.path), 'all' + format(str(i), '0>4s') + '.jpg')#处理后的格式也为jpg格式的,当然这里可以改成png格式\r\n # 这种情况下的命名格式为000xxxx.jpg形式,可以自主定义想要的格式\r\n try:\r\n os.rename(src, dst)\r\n print ('converting %s to %s ...' % (src, dst))\r\n i = i + 1\r\n except:\r\n continue\r\n print ('total %d to rename & converted %d jpgs' % (total_num, i))\r\n\r\nif __name__ == '__main__':\r\n demo = BatchRename()\r\n demo.rename()\r\n ","repo_name":"guyuehome/guyueclass","sub_path":"planning&perception/yolov5_mobile_robot/baidu_picture_downloader/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"zh","doc_type":"code","stars":534,"dataset":"github-code","pt":"81"} +{"seq_id":"20012843595","text":"if __name__ == '__main__':\n dict = {}\n for _ in range(int(input())):\n name = input()\n score = float(input())\n if score in dict:\n dict[score].append(name)\n else:\n dict[score] = [name]\n list = []\n for i in dict:\n list.append([i, dict[i]])\n list.sort()\n result = list[1][1]\n result.sort()\n print(*result, sep='\\n')","repo_name":"FoRGoG/HackerRank","sub_path":"Python/02. basic_data_types/03. nested lists.py","file_name":"03. nested lists.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3974810741","text":"def counting_problems(n, m):\r\n a = [i for i in range(1,n+1)]\r\n for x in a:\r\n for i in range(1, n-2):\r\n if i != x:\r\n if x in range(1,m+1):\r\n new_arr = [x, i , x]\r\n \r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n t =int(input('T:'))\r\n for tt in range(t):\r\n n,m = input('N,M:').strip().split()\r\n print(n, m)\r\n print(counting_problems(int(n), int(m)))\r\n","repo_name":"AnshumanSinghh/Desktop-AI-Assistant","sub_path":"Desktop Assistant/hacker _earth.py","file_name":"hacker _earth.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"24657354474","text":"from __future__ import division, with_statement\nimport multiprocessing\n\nmax_procs = 1\n_used_procs = multiprocessing.Value('i', 1)\n_plock = multiprocessing.Lock()\n\ndef set_max_processors(value=None):\n '''\n set_max_processors(value=None)\n\n Set the maximum number of processors to ``value`` (or to the number of\n physical CPUs if ``None``).\n\n Note that this is valid for the current process and its children, but not\n the parent.\n\n Parameters\n ----------\n value : int, optional\n Number of processors to use. Defaults to number of CPUs (as returned by\n ``multiprocessing.cpu_count()``).\n '''\n global max_procs\n if value is None:\n value = multiprocessing.cpu_count()\n max_procs = value\n\ndef get_proc():\n '''\n available = get_proc()\n\n Reserve a processor\n\n Returns\n -------\n available : bool\n True if a processor is available\n '''\n with _plock:\n if _used_procs.value >= max_procs:\n return False\n _used_procs.value += 1\n return True\n\ndef release_proc():\n '''\n release_proc()\n\n Returns a processor to the pool\n '''\n with _plock:\n _used_procs.value -= 1\n\ndef release_procs(n, count_current=True):\n '''\n release_procs(n, count_current=True)\n\n Returns ``n`` processors to the pool\n\n Parameters\n ----------\n n : int\n Number of processors to release\n count_current : bool, optional\n Whether the current processor is to be included in ``n`` (default: True)\n '''\n if count_current:\n n -= 1\n if n > 0:\n with _plock:\n _used_procs.value -= n\n\ndef get_procs(desired=None, use_current=True):\n '''\n n = get_procs(desired=None, use_current=True)\n\n Get the up to ``desired`` processors (use None for no maximum).\n\n Parameters\n ----------\n desired : int, optional\n Number of processors you wish. By default, there is no maximum\n use_current: bool, optional\n Whether to count the current processor, True by default.\n '''\n if desired is None:\n desired = 1024 # This should last a few years\n n = (1 if use_current else 0)\n while n < desired:\n if get_proc():\n n += 1\n else:\n return n\n return n\n\n","repo_name":"luispedro/milk","sub_path":"milk/utils/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":610,"dataset":"github-code","pt":"81"} +{"seq_id":"37240765769","text":"from tkinter import *\nimport sys, re\nfrom tkinter import ttk\nimport tkinter as tki\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import letter\n\nrootR = Tk()\nrootR.state(\"zoomed\")\nrootR.geometry(\"900x600\")\nrootR.title(\"Inventario general\")\nrootR.iconbitmap('med.ico')\nrootR.config(cursor=\"hand2\",bg=\"#f0f0f0\")\n\nconfiguracion_tittle = Frame(rootR, width=900, height=40,bg=\"#650090\",bd=3, relief=\"groove\").pack(fill='x')\n\n\n\n#---------------------------------------------------------------------\nnavegacion_inferior = Frame(rootR, width=400, height= 35)\nnavegacion_inferior.pack(side=\"bottom\",fill='x')\nnavegacion_inferior.config(bg=\"#650090\", relief=\"groove\")\n\n\nscrollbar = Scrollbar(rootR)\nscrollbar.pack(side=RIGHT, fill='y')\n\nlistbox = Text(rootR, yscrollcommand=scrollbar.set)\nfor i in range(10000):\n listbox.insert(END, str(i))\nlistbox.pack(fill='both',expand=1)\nscrollbar.config(command=listbox.yview)\n\ndef imprimir():\n\tdocumento = listbox\n\tc = canvas.Canvas(\"inventario_prueba.pdf\")\n\n\tc.drawString(120,760,\"Reporte de inventario Medicinae Apparatu Inventory\")\n\tc.drawString(120,780,)\n\tc.save()\n\n\natras = Button(navegacion_inferior, text=\"Atras\", relief=\"flat\")\natras.grid(row=0,column=0, padx=(30))\natras.config(bg=\"#650090\", fg=\"white\",font=(\"Courier New\",13))\n\nimprimir_informacion = Button(navegacion_inferior, text=\"Imprimir\", relief=\"flat\",command=imprimir)\nimprimir_informacion.grid(row=0,column=1, padx=(30))\nimprimir_informacion.config(bg=\"#650090\", fg=\"white\",font=(\"Courier New\",13))\n\nrootR.mainloop()\n","repo_name":"andetach1/Medicinae-Apparatu-Inventory","sub_path":"codigo/inventario.py","file_name":"inventario.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73188734345","text":"import pygame,random,math\n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.r = 7\n self.show = True\n self.start = False\n self.pos = [x,y]\n self.speed = [0,0]\n self.rect = pygame.draw.circle(screen,WHITE,self.pos,self.r)\n self.rect.center = self.pos\n\n def update(self):\n\n if self.show:\n self.key()\n else:\n self.move_to()\n self.touch_wall()\n self.rect = pygame.draw.circle(screen,WHITE,self.pos,self.r)\n self.speed[0] *= 0.9\n self.speed[1] *= 0.9\n self.pos[0] += int(self.speed[0])\n self.pos[1] += int(self.speed[1])\n\n def move_to(self):\n mouse_pos = pygame.mouse.get_pos()\n self.speed[0] = int((mouse_pos[0] - self.pos[0])/5)\n self.speed[1] = int((mouse_pos[1] - self.pos[1])/5)\n\n def key(self):\n\n if key[\"up\"]:\n self.speed[1] = -12\n\n if key[\"down\"]:\n self.speed[1] = 12\n\n if key[\"left\"]:\n self.speed[0] = -12\n\n if key[\"right\"]:\n self.speed[0] = 12\n\n def kill(self):\n for i in enemies:\n if i.show:\n collide_list = pygame.sprite.spritecollide(self, [i] , False)\n if collide_list != []:\n return True\n \n\n def touch_wall(self):\n if self.pos[0]+self.r >= 799:\n self.pos[0] = 799 - self.r\n\n if self.pos[0]-self.r <= 0:\n self.pos[0] = self.r\n\n if self.pos[1]+self.r >= 599: \n self.pos[1] = 599 - self.r\n\n if self.pos[1]-self.r <= 0:\n self.pos[1] = self.r\n\n\nclass enemy(pygame.sprite.Sprite):\n \n def __init__(self,x,y):\n pygame.sprite.Sprite.__init__(self)\n self.show = False\n self.start = False\n self.next_time = pygame.time.get_ticks() + random.randint(30,600)*100\n self.r = 10\n self.pos = [x,y]\n self.move_to_pos = [random.randint(0,800),random.randint(0,600)]\n if get_distance(self.move_to_pos,self.pos) < 30:\n self.__init__(random.randint(0,800),random.randint(0,600))\n self.speed = [0,0]\n self.rect = pygame.draw.circle(screen,RED,self.pos,self.r)\n self.rect.center = self.pos\n self.move_to()\n\n\n def update(self):\n self.kill()\n if self.show:\n self.rect = pygame.draw.circle(screen,RED,self.pos,self.r)\n self.pos[0] += self.speed[0]\n self.pos[1] += self.speed[1]\n\n elif pygame.time.get_ticks() >= self.next_time:\n self.show = True\n\n\n def move_to(self):\n # global ball\n self.speed[0] = int((self.move_to_pos[0] - self.pos[0])/30)\n self.speed[1] = int((self.move_to_pos[1] - self.pos[1])/30)\n\n def kill(self):\n kill = False\n if self.pos[0]+self.r >= 800 + 2 * self.r:\n # self.pos[0] = 799 - self.r\n kill = True\n\n if self.pos[0]-self.r <= -0 - 2 * self.r:\n # self.pos[0] = self.r\n kill = True\n\n if self.pos[1]+self.r >= 600 + 2 * self.r: \n # self.pos[1] = 599 - self.r\n kill = True\n\n if self.pos[1]-self.r <= -0 - 2 * self.r: \n # self.pos[1] = self.r\n kill = True\n \n if kill and self.show:\n global score\n self.__init__(self.pos[0],self.pos[1])\n score += 1\n\nclass Score(pygame.sprite.Sprite):\n global score\n def __init__(self, center_x, center_y, font, font_size):\n self.font = pygame.font.Font(font, font_size)\n self.image = self.font.render(str(0), 1, (255, 255, 255))\n self.pos = [center_x, center_y]\n self.rect = self.image.get_rect()\n self.rect.center = self.pos\n\n def update(self):\n self.image = self.font.render(str(score), 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n self.rect.center = self.pos\n screen.blit(self.image, self.rect)\n\ndef get_distance(pos1,pos2):\n return math.sqrt((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2)\n\ndef darw_mouse():\n mouse_pos = pygame.mouse.get_pos()\n mouse_surface = pygame.transform.scale(mouse_img, (32, 32))\n mouse_rect = mouse_surface.get_rect()\n mouse_rect.center = mouse_pos\n screen.blit(mouse_surface,mouse_rect)\n\ndef restart():\n ball.show = True\n\nWHITE = (255,255,255)\nRED = (200,50,50)\nGREEN = (0,70,0)\nBLUE = (0,0,90)\nBLACK = (0,0,0)\n\npygame.init()\nsize = (800,600)\nscreen = pygame.display.set_mode(size)\ngroup = pygame.sprite.Group()\n\nfps = 30\npygame.display.set_caption(\"tank\",\"pgame\")\ndone = False\nnext_time = 0\nclock = pygame.time.Clock()\ngroup = pygame.sprite.Group()\n\nscore = 0\nhp = 10\nkey = {\"up\":False,\"down\":False,\"left\":False,\"right\":False}\nmouse_img = pygame.image.load('mouse.jpg')\nenemies = []\nball = Ball(400,300)\nscore_image = Score(30,30, 'freesansbold.ttf', 30)\n\n\nfor i in range(10):\n enemies.append(enemy(random.randint(0,800),random.randint(0,600)))\n group.add(enemies[i])\n\n\n# enemy = enemy(400,300)\ngroup.add(ball)\npygame.mouse.set_visible(False)\n\n\n\nwhile not done:\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n done = True\n\n if event.type == pygame.KEYDOWN:\n \n if event.key == pygame.K_LEFT:\n key[\"left\"] = True\n \n if event.key == pygame.K_RIGHT:\n key[\"right\"] = True\n \n if event.key == pygame.K_UP:\n key[\"up\"] = True\n \n if event.key == pygame.K_DOWN:\n key[\"down\"] = True\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n key[\"left\"] = False\n \n if event.key == pygame.K_RIGHT:\n key[\"right\"] = False\n \n if event.key == pygame.K_UP:\n key[\"up\"] = False\n \n if event.key == pygame.K_DOWN:\n key[\"down\"] = False\n if event.type == 1:\n restart()\n\n # print(pygame.time.get_ticks(),next_time)\n if pygame.time.get_ticks() >= next_time and len(enemies)<=50:\n enemies.append(enemy(random.randint(0,800),random.randint(0,600)))\n group.add(enemies[-1])\n\n\n screen.fill(BLACK)\n darw_mouse()\n for sprite in group:\n sprite.update()\n score_image.update()\n \n if not ball.show:\n screen.fill(BLACK)\n\n if ball.kill():\n # pass\n hp -= 1\n print(hp)\n if hp <= 0:\n ball.show = False\n\n\n pygame.display.flip()\n clock.tick(fps)\npygame.quit() ","repo_name":"jc533/games","sub_path":"tank3.py","file_name":"tank3.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74600518344","text":"import BPG\nimport yaml\nfrom BPG.lumerical.code_generator import LumericalMaterialGenerator\nfrom pathlib import Path\n\n\ndef test_example_lumerical_map():\n \"\"\"\n Runs the provided example lumerical materials specification through the LumericalMaterialGenerator class and\n checks that they are properly created\n \"\"\"\n\n # 1) load the lumerical map file from the examples dir into the\n filepath = 'example_tech/BPG_tech_files/lumerical_map.yaml'\n # If the path where we place the output does not exist, create it\n outpath = Path('gen_libs/bpg_test_suite/lsf_writer_tests')\n outpath.mkdir(exist_ok=True, parents=True)\n outpath = str(outpath / 'materials.lsf')\n\n with open(filepath, 'r') as f:\n lumerical_map = yaml.load(f, Loader=yaml.CFullLoader if yaml.__with_libyaml__ else yaml.FullLoader)\n\n # 2) Extract the custom materials under the materials key\n mat_map = lumerical_map['materials']\n\n # 3) Create the LumericalMaterialGenerator class and load the data in\n lmg = LumericalMaterialGenerator(outpath)\n lmg.import_material_file(mat_map)\n lmg.export_to_lsf()\n\n\nif __name__ == '__main__':\n test_example_lumerical_map()\n","repo_name":"BerkeleyPhotonicsGenerator/BPG","sub_path":"tests/test_lumerical_material_generator.py","file_name":"test_lumerical_material_generator.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"81"} +{"seq_id":"74471230345","text":"# file build system\n#\n# The purpose of this file is to load a system configuration\n# in the graphic data base\n#\n\nimport json\n\nimport redis\nimport pickle\nimport gzip\n\n\nclass Build_Configuration(object):\n def __init__( self, redis_site ): \n self.redis_handle = redis.StrictRedis( host = redis_site[\"host\"] , port=redis_site[\"port\"], db=redis_site[\"graph_db\"] , decode_responses=True)\n self.delete_all()\n \n\n\n def delete_all(self): #tested\n self.redis_handle.flushdb()\n \n\n\n def restore_extraction(self,filename):\n file = gzip.GzipFile(filename, 'rb')\n buffer = b\"\"\n while True:\n data = file.read()\n if data == b\"\":\n break\n buffer += data\n extract = pickle.loads(buffer)\n file.close()\n keys = extract.keys()\n print(\"len\",len(keys))\n for i,item in extract.items():\n self.redis_handle.restore(name = i,ttl=0, value = item, replace = True)\n \n\n\n\nif __name__ == \"__main__\" :\n\n file_handle = open(\"system_data_files/redis_server.json\",'r')\n data = file_handle.read()\n file_handle.close()\n redis_site = json.loads(data)\n\n\n bc = Build_Configuration(redis_site)\n \n bc.restore_extraction(\"system_data_files/extraction_file.pickle\")\n\n ","repo_name":"NanoDataCenter/nano_data_center","sub_path":"code/redis_support_py3/construct_graph_py3.py","file_name":"construct_graph_py3.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"18094249434","text":"#Stephanie Bravo\n#February 20, 2019\n#This program asks the user for the number of hours untill weekend. prints out days and leftover hours.\n\nhours = int(input(\"Enter number of hours:\"))\n\ndays = hours//24\n\nprint(\"Days:\", days)\n\nleftover = hours%24\n\nprint(\"Hours:\", leftover)\n","repo_name":"stephanieb00/CSCI127","sub_path":"Python Work/Assignment19.py","file_name":"Assignment19.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1942184870","text":"try:\n from utils import *\n from type_collector import Context, MapInfo, LabelInfo\n from assembly import Robot, Map, Action, DIR\nexcept:\n from utils07.utils import *\n from utils07.type_collector import Context, MapInfo, LabelInfo\n from utils07.assembly import Robot, Map, Action, DIR\n\ndirections = {\n 'N': DIR.N,\n 'S': DIR.S,\n 'E': DIR.E,\n 'W': DIR.W\n}\n\n\nclass CodeGenerator(object):\n '''\n Generador de Codigo\n ''' \n def __init__(self):\n self.errors = []\n self.warnings = []\n \n @visitor.on('node')\n def visit(self, node):\n pass\n \n @visitor.when(ProgramNode)\n def visit(self, node: ProgramNode, context: Context):\n context.code += \\\n'''\nimport numpy as np\nfrom collections import deque\nimport enum\nfrom utils07.assembly import Map, Robot, DIR\n\n\n'''\n self.visit(node.sec_maps, context) \n self.visit(node.sec_inst, context) \n context.code +=\\\n'''\ntry:\n for inst in robot.instructions:\n pass\nexcept Exception as e:\n print('Error en tiempo de Ejecucion en :')\n print(e)\n''' \n return context\n\n #################################################### \n\n @visitor.when(SecMapsNode)\n def visit(self, node: SecMapsNode, context: Context):\n context.code+=\\\n'''\n###### MAPS ######\n''' \n for mapx in node.maps:\n self.visit(mapx, context) \n\n @visitor.when(SecInstructionNode)\n def visit(self, node: SecInstructionNode, context: Context):\n context.code+= \\\n'''\n###### INST #######\n\nrobot = Robot()\n\n\n''' \n context.current_index = 0\n node.instructions.append(NopNode())\n for inst in node.instructions:\n self.visit(inst, context)\n\n ##################### .MAPS ########################\n\n @visitor.when(MapDeclaration)\n def visit(self, node: SecMapsNode, context: Context):\n m = Map((len(node.map), len(node.map[0])))\n\n\n context.code += \\\nf'''\n##### {node.id} ######\n{node.id} = Map(({len(node.map)}, {len(node.map[0])}))\n\n'''\n for i, row in enumerate(node.map):\n for j, col in enumerate(row):\n self.visit(node.map[i][j], context, node.id, i, j)\n context.code+='\\n'\n return m\n\n @visitor.when(SquareNode)\n def visit(self, node: SquareNode, context: Context, map_name: str, i, j):\n if node.lex == 'V':\n pass\n elif node.lex == 'H': \n context.code+= f'{map_name}.addHamper(({i},{j}))\\n'\n else:\n ######TODO#######\n # Incluir Pipes #\n #################\n num = int(node.lex)\n context.code += f'{map_name}[{i},{j}] = {num}\\n'\n \n\n ################################################# \n\n @visitor.when(MovNode)\n def visit(self, node: MovNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.move, [DIR.{node.lex}])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n #---------GOTOs-------------#\n\n @visitor.when(GotoNode)\n def visit(self, node: GotoNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.GoTo, [{context.labels[node.lex].index_instr}])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(GotoIfZeroNode)\n def visit(self, node: GotoIfZeroNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.GoToIfZero, [{context.labels[node.lex].index_instr}])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n\n @visitor.when(GotoIfPositive)\n def visit(self, node: GotoIfPositive, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.GoToIfPositive, [{context.labels[node.lex].index_instr}])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(GotoIfNegative)\n def visit(self, node: GotoIfNegative, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.GoToIfNegative, [{context.labels[node.lex].index_instr}])' \n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n #------------------------#\n\n @visitor.when(LabelNode)\n def visit(self, node: LabelNode, context: Context):\n context.code += f'#### {node.lex} ####\\n' \n\n @visitor.when(OverlapNode)\n def visit(self, node: OverlapNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.overlap, [ {node.lex} ])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(PullNode)\n def visit(self, node: PullNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.pull, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(PushNode)\n def visit(self, node: PushNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.paste_push, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(PopNode)\n def visit(self, node: PopNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.cut, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(CopyNode)\n def visit(self, node: CopyNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.copy, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n \n @visitor.when(PasteNode)\n def visit(self, node: PasteNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.paste, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n #----------Aritmetics------------#\n\n @visitor.when(AddNode)\n def visit(self, node: AddNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.add, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(SubNode)\n def visit(self, node: SubNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.sub, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(DivNode)\n def visit(self, node: DivNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.div, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(MulNode)\n def visit(self, node: MulNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.mul, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n #@visitor.when(ModNode)\n #def visit(self, node: ModNode, context: Context):\n # robot = context.robot\n # robot : Robot\n # robot.enqueue_instruction(robot.)\n\n @visitor.when(DecNode)\n def visit(self, node: DecNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.decrem, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(IncNode)\n def visit(self, node: IncNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.increm, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n #---------------------------------#\n\n @visitor.when(PushMemNode)\n def visit(self, node: PushMemNode, context: Context):\n robot = context.robot\n robot : Robot\n code = 'robot.enqueue_instruction(robot.push_mem, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(PopMemNode)\n def visit(self, node: PopMemNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.pop_mem, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(NopNode)\n def visit(self, node: NopNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.none, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n @visitor.when(PrintNode)\n def visit(self, node: PrintNode, context: Context):\n robot = context.robot\n robot : Robot\n code = f'robot.enqueue_instruction(robot.printHandBox, [])'\n context.code += code + f' # {context.current_index}\\n'\n context.current_index += 1\n\n ","repo_name":"LazaroDGM/compilers","sub_path":"utils07/code_generator.py","file_name":"code_generator.py","file_ext":"py","file_size_in_byte":9776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41207186903","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n\nversion = __import__(\"ckeditor\").__version__\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n sys.exit()\n\nif sys.argv[-1] == \"tag\":\n os.system(f\"git tag -a {version} -m 'version {version}'\")\n os.system(\"git push --tags\")\n sys.exit()\n\nlong_description = \"\\n\".join(\n [\n open(\"README.rst\").read(),\n open(\"AUTHORS.rst\").read(),\n open(\"CHANGELOG.rst\").read(),\n ]\n)\n\n\ndef get_source_files():\n for dirname, _, files in os.walk(\"ckeditor/static/ckeditor/ckeditor/_source\"):\n for filename in files:\n yield os.path.join(\"/\".join(dirname.split(\"/\")[1:]), filename)\n\n\nsetup(\n name=\"django-ckeditor\",\n version=version,\n description=\"Django admin CKEditor integration.\",\n long_description=long_description,\n author=\"Shaun Sephton & Piotr Malinski\",\n author_email=\"riklaunim@gmail.com\",\n url=\"https://github.com/django-ckeditor/django-ckeditor\",\n project_urls={\n \"Documentation\": \"https://django-ckeditor.readthedocs.io/en/latest/\",\n \"Source\": \"https://github.com/django-ckeditor/django-ckeditor\",\n },\n packages=find_packages(exclude=[\"*.demo\"]),\n zip_safe=False,\n install_requires=[\n \"Django>=3.2\",\n \"django-js-asset>=2.0\",\n ],\n python_requires=\">=3.8\",\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Intended Audience :: Developers\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n ],\n)\n","repo_name":"django-ckeditor/django-ckeditor","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":2297,"dataset":"github-code","pt":"81"} +{"seq_id":"26703667240","text":"# Null matrix is if all the elements are zero in a matrix\nA = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n\nprint(\"Null matrix: \")\nerror = False\nfor row in range(0, len(A)):\n for col in range(0, len(A[row])):\n print(\"\\t\", A[row][col], end='')\n if A[row][col] != 0: error = True\n print()\n\nif error: print(\"=> This isn't a null matrix.\")","repo_name":"naiemofficial/Matrix-Multi-dimensional-Vector-and-Array","sub_path":"Python/Types of Matrix/Null matrix.py","file_name":"Null matrix.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9697099875","text":"import json\n\n\n# --------------- Helpers that build all of the responses ---------------------\ndef close(session_attributes, fulfillment_state, message):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'Close',\n 'fulfillmentState': fulfillment_state,\n 'message': {\n 'contentType': 'PlainText',\n 'content': message\n }\n }\n }\n\n\ndef elicit_intent(session_attributes, message):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'ElicitIntent',\n 'message': {\n 'contentType': 'PlainText',\n 'content': message\n }\n }\n }\n\n\ndef handle_session_end_request():\n message = \"Hope you found what you were looking for. Hear you later!\"\n return close({}, 'Fulfilled', message)\n\n\ndef debug(event):\n response = {\n 'sessionAttributes': {},\n 'dialogAction': {\n 'type': 'ElicitIntent',\n 'message': {\n 'contentType': 'PlainText',\n 'content': json.dumps(event)\n }\n }\n }\n return response\n\n\ndef parse_prices(prices):\n \"\"\"\n Takes list of strings as a parameter and parses it's elements to sound better\n in Libby's speech. For example: 4.50 -> 4 euros 50 cents and 4.00 -> 4 euros\n \"\"\"\n res = []\n for s in prices:\n price = list(s)\n if s[2] == '0':\n s = s[0]\n res.append(s + \" euros\")\n elif is_number(s):\n for i in range(0, len(price)):\n if price[i] == '.':\n price[i] = ' euros '\n res.append(\"\".join(price) + \" cents\")\n else:\n res.append(s)\n return res\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef make_string_list(input_list):\n if len(input_list) > 1:\n ordered_list = sorted(input_list)\n last = ordered_list.pop()\n return \", \".join(ordered_list) + \" and \" + last\n else:\n return input_list[0]\n","repo_name":"NickKuts/Libby","sub_path":"lambda_func/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"14282274732","text":"import pylab\nimport matplotlib.pyplot as plt\nimport numpy as np\n# y = mx + b\n# m is slope, b is y-intercept\n\n####################################################################3\ndef compute_error_for_line_given_points(b, m, cells,X,Y):\n totalError = 0\n \n for i in range(0, len(cells)):\n x = float(X[i])\n y = float(Y[i])\n totalError += (y - (m * x + b)) ** 2\n \n return (totalError / float(len(cells)))\n\ndef step_gradient(b_current, m_current, cells, learningRate,X,Y):\n b_gradient = 0\n m_gradient = 0\n N = float(len(cells))\n for i in range(0, len(cells)):\n x = float(X[i])\n y = float(Y[i])\n b_gradient += -(2/N) * (y - ((m_current * x) + b_current))\n m_gradient += -(2/N) * x * (y - ((m_current * x) + b_current))\n \n new_b = b_current - (learningRate * b_gradient)\n new_m = m_current - (learningRate * m_gradient)\n return [new_b, new_m]\n\ndef gradient_descent_runner(cells, starting_b, starting_m, learning_rate, num_iterations,X,Y):\n b = starting_b\n m = starting_m\n q = []\n \n for i in range(num_iterations):\n b, m = step_gradient(b, m, np.array(cells), learning_rate,X,Y)\n #for j in range(0,50):\n # x = float(X[i])\n # y = float(Y[i])\n # q[i] = (m*x)+b\n \n #print(q)\n #plt.plot(q)\n #plt.show()\n\n \n return [b, m]\n\ndef run():\n ##Solution for Question 3.1\n\n# Code for Part a of Question 3.1\n# Reading the csv file\n f = open( 'auto-mpg.data', 'rU' )\n# Declaring four empty lists\n# X represents mpg (Miles per Galon)\n X = []\n# Y represents displacement (Engine Displacement)\n Y = []\n\n# training set and testing set are currently empty list\n trainingSet = []\n testSet = []\n \n#Reading the input data file\n for line in f:\n\t cells = line.split( \" \" )\n#Storing the values of mpg and displacement in X and Y respectively\t\n\t X.append(cells[ 0 ])\n\t Y.append(cells[ 2 ])\n# Initialising a counter with variable i\n \t\n i=0\n#Storing first 50 values in training Set\n for i in range(0,50):\n\t trainingSet.append([X[i],Y[i]])\n\t i+=1\n#Storing next 50 values in testing set\t\n for i in range(50,100):\n\t testSet.append([X[i],Y[i]])\n\t i+=1\n\t\n ###########################################################################\n# Code for Part b of Question 3.1\t\n#Plotting the training data points\n\n# We can use zip to unpack our data from pairs into lists\n# plt.scatter(*zip(*trainingSet))\n# plt.xlabel('Miles Per Gallon')\n# plt.ylabel('Displacement')\n# plt.show() \n########################################################################### \n #### Command to show the plot for Part b\n #\n initial_b = 0 # initial y-intercept guess\n initial_m = 0 # initial slope guess\n learning_rate = 0.0001 \n num_iterations = 1000\n x=0\n y=0\n \n print( \"Starting gradient descent at b = {0}, m = {1}, error = {2}\".format(initial_b, initial_m, compute_error_for_line_given_points(initial_b, initial_m, cells,X,Y)))\n [b, m] = gradient_descent_runner(cells, initial_b, initial_m, learning_rate, num_iterations,X,Y)\n print (\"After {0} iterations b = {1}, m = {2}, error = {3}\".format(num_iterations, b, m, compute_error_for_line_given_points(b, m, cells,X,Y)))\n \n # pylab.plot(x,y); \n # pylab.grid(); \n # pylab.show()\n # for i in range(1,1000):\n # print(Z[i]) \n#\t predictedYset.append([1,Z[i]])\n#\t i+=1\n# plt.plot(*zip(*predictedYset))\n \n \n #############\n\n# print (m,b)\n# for i in range(0,50):\n#\t y = m*float(X[i])+b\n#\t i=i+1\n# plt.plot(X,y) \n\n \n \t\nif __name__ == '__main__':\n run()\n\n\n\n","repo_name":"harshitaJhavar/Neural-Networks-and-its-Applications-Deep-Learning-","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6137984","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom decouple import config\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom authentication.models import CustomUser\nfrom plaid import Client\n\nfrom .models import Account, PlaidToken, Transaction\n\nfrom pprint import pprint\n\n# Create your views here.\n\nPLAID_CLIENT_ID = config('PLAID_CLIENT_ID')\nPLAID_SECRET = config('PLAID_SECRET')\nPLAID_ENV = config('PLAID_ENVIRONMENT')\nPLAID_REDIRECT_URI = config('PLAID_REDIRECT_URI')\n\nclient = Client(client_id=PLAID_CLIENT_ID,\n secret=PLAID_SECRET, environment=PLAID_ENV)\n\n\ndef createLinkToken():\n user = None\n\n res = client.LinkToken.create({\n 'user': {\n 'client_user_id': user.id\n },\n 'products': ['auth'],\n 'client_name': 'Ledger',\n 'country_codes': ['US'],\n 'language': 'en',\n })\n\n return JsonResponse(res)\n\n\n@csrf_exempt\ndef registerAccount(request):\n\n userID = int(request.POST.get('userID'))\n publicToken = request.POST.get('publicToken')\n\n # Get User Object\n currUser = CustomUser.objects.get(pk=userID)\n\n # Retrieve access Token\n res = client.Item.public_token.exchange(publicToken)\n accessToken = res['access_token']\n\n allAccounts = client.Accounts.get(accessToken)[\"accounts\"]\n\n for account in allAccounts:\n newAccount = Account(\n accountID=account[\"account_id\"],\n currentBalance=account[\"balances\"][\"current\"],\n name=account[\"name\"],\n type=account[\"type\"],\n user=currUser)\n newAccount.save()\n\n # Check if user already has access token\n\n # Save permanent access token for user\n newPlaidToken = PlaidToken.objects.create(\n accessToken=accessToken,\n user=currUser)\n newPlaidToken.save()\n\n syncTransactions(accessToken)\n\n\ndef syncTransactions(accessToken):\n\n res = client.Transactions.get(\n accessToken, start_date='2021-04-19', end_date='2021-05-18')\n\n transactions = res['transactions']\n\n while len(transactions) < res['total_transactions']:\n res = client.Transactions.get(\n accessToken, start_date='2021-04-19', end_date='2021-05-18', offset=len(transactions))\n transactions.extend(res['transactions'])\n\n for transaction in transactions:\n currAccount = Account.objects.get(accountID=transaction[\"account_id\"])\n\n newTransaction = Transaction.objects.create(\n transactionID=transaction[\"transaction_id\"],\n amount=transaction[\"amount\"],\n date=transaction[\"date\"],\n categoryID=transaction[\"category_id\"],\n merchantName=transaction[\"name\"],\n merchantLocation=transaction[\"location\"][\"city\"],\n account=currAccount\n )\n newTransaction.save()\n","repo_name":"Abid-Rais/Ledger","sub_path":"Ledger/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29766191890","text":"\n# Required module imports\nimport csv\nimport selenium.webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\n# User defined variables for data retreival\norigin = \"BOM\" \t\t\t\t# Origin airport code\ndestin = \"DEL\" \t\t\t\t# Destination airport code\ntrDate = sys.argv[1]\t\t\t# Date as 1st command line argument.\n\n\"\"\" The following is the Base Url for fetching data from MakeMyTrip Website.\n\tThis URL appears in the search bar after origin, destination and date inputs on the landing page.\n\tThus, this URL can be changed based on User Inputs and required data can be fetched.\n\"\"\"\nbaseDataUrl = \"https://www.makemytrip.com/flight/search?itinerary=\"+ origin +\"-\"+ destin +\"-\"+ trDate +\"&tripType=O&paxType=A-1_C-0_I-0&intl=false&=&cabinClass=E\"\n\ntry:\n\tdriver = selenium.webdriver.Chrome() # Chrome driver is being used.\n\tprint (\"Requesting URL: \" + baseDataUrl)\n\n\tdriver.get(baseDataUrl) \t\t\t # URL requested in browser.\n\tprint (\"Webpage found ...\")\n\n\telement_xpath = '//*[@id=\"left-side--wrapper\"]/div[2]' # First box with relevant flight data.\n\n\t# Wait until the first box with relevant flight data appears on Screen\n\telement = WebDriverWait(driver, 15).until(EC.visibility_of_element_located((By.XPATH, element_xpath)))\n\n\t# Scroll the page till bottom to get full data available in the DOM.\n\tprint (\"Scrolling document upto bottom ...\")\n\tfor j in range(1, 100):\n\t\tdriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n\t# Find the document body and get its inner HTML for processing in BeautifulSoup parser.\n\tbody = driver.find_element_by_tag_name(\"body\").get_attribute(\"innerHTML\")\n\n\tprint(\"Closing Chrome ...\") # No more usage needed.\n\tdriver.quit() \t\t\t\t# Browser Closed.\n\n\tprint(\"Getting data from DOM ...\")\n\tsoupBody = BeautifulSoup(body) # Parse the inner HTML using BeautifulSoup\n\n\t# Extract the required tags \n\tspanFlightName = soupBody.findAll(\"span\", {\"class\": \"airways-name \"}) \t\t\t# Tags with Flight Name\n\tpFlightCode = soupBody.findAll(\"p\", {\"class\": \"fli-code\"})\t\t\t\t# Tags with Flight Code\n\tdivDeptTime = soupBody.findAll(\"div\", {\"class\": \"dept-time\"})\t\t\t\t# Tags with Departure Time\n\tpDeptCity = soupBody.findAll(\"p\", {\"class\": \"dept-city\"})\t\t\t\t# Tags with Departure City\n\tpFlightDuration = soupBody.findAll(\"p\", {\"class\": \"fli-duration\"})\t\t\t# Tags with Flight Duration\n\tpArrivalTime = soupBody.findAll(\"p\", {\"class\": \"reaching-time append_bottom3\"}) \t# Tags with Arrival Time\n\tpArrivalCity = soupBody.findAll(\"p\", {\"class\": \"arrival-city\"})\t\t\t\t# Tags with Arrival City\n\tspanFlightCost = soupBody.findAll(\"span\", {\"class\": \"actual-price\"})\t\t\t# Tags with Flight Cost\n\n\t\n\t# Data Headers\n\tflightsData = [[\"flight_name\", \"flight_code\", \"departure_time\", \"departure_city\", \"flight_duration\", \"arrival_time\", \"arrival_city\", \"flight_cost\"]]\n\n\t# Extracting data from tags and appending to main database flightsData\n\tfor j in range(0, len(spanFlightName)):\n\t\tflightsData.append([spanFlightName[j].text, pFlightCode[j].text, divDeptTime[j].text, pDeptCity[j].text, pFlightDuration[j].text, pArrivalTime[j].text, pArrivalCity[j].text, spanFlightCost[j].text])\n\n\t# Output File for FlightsData. This file will have the data in comma separated form.\n\toutputFile = \"FlightsData_\" + origin +\"-\"+ destin +\"-\"+ trDate.split(\"/\")[0] + \"-\" + trDate.split(\"/\")[1] + \"-\" + trDate.split(\"/\")[2] + \".csv\"\n\t\n\t# Publishing Data to File\n\tprint(\"Writing flight data to file: \"+ outputFile + \" ...\")\n\twith open(outputFile, 'w', newline='') as spfile:\n\t csv_writer = csv.writer(spfile)\n\t csv_writer.writerows(flightsData)\n\t print (\"Data Extracted and Saved to File. \")\n\nexcept Exception as e:\n\tprint (str(e))\n\n\n# EOF\n# ----------------------------------------------------------------------------------------------------------\n#print(\"Records\\nFlight Name: \"+ str(len(spanFlightName)) + \"\\nFlightCode: \"+ str(len(pFlightCode)) + \"\\nDept Time: \"+ str(len(divDeptTime)) + \"\\nDept City: \"+ str(len(pDeptCity)) + \"\\nFlight Duration: \"+ str(len(pFlightDuration)) + \"\\nArrival Time: \"+ str(len(pArrivalTime)) + \"\\nArrival City: \"+ str(len(pArrivalCity)) + \"\\nFlight Cost: \"+ str(len(spanFlightCost)))\n#print(flightsData)\n#print(outputFile)\n\n","repo_name":"ldtalent/anupt-flightscraper","sub_path":"MMT_Scraper/dataFetcher.py","file_name":"dataFetcher.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17563525969","text":"# 66. Plus One\n# Easy\n# Given a non-empty array of decimal digits representing a non-negative integer, increment one to the integer.\n#\n# The digits are stored such that the most significant digit is at the head of the list, and each element in the array contains a single digit.\n#\n# You may assume the integer does not contain any leading zero, except the number 0 itself.\n\nclass Solution:\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n #数组转换成数字\n intNum=0\n for i in range(len(digits)):\n intNum=intNum*10+digits[i]\n intNum+=1\n #数字转换成字符\n strNum=str(intNum)\n #字符转换成数组\n res=[]\n for i in range(len(strNum)):\n res.append(int(strNum[i]))\n return res","repo_name":"pangpangcat/leetcodepractice","sub_path":"DailyChallenge/66.py","file_name":"66.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12526527780","text":"import numpy as np\nimport socketio\nimport eventlet\nimport base64\nimport cv2\nimport inputs\nimport multiprocessing\nfrom gamepad import GamepadHandler\nfrom gamepad import GamepadReader\n\nsio = socketio.Server(cors_allowed_origins='*')\napp = socketio.WSGIApp(sio)\nconnected_clients = ''\n\n\n@sio.event\ndef on_connect(sid, environ):\n print(sio.emit('receiveGamepad', \"HALLO\",room=sid))\n print(f\"Client {sid} connected.\")\n sio.emit('receive', 'JALO', room=connected_clients)\n connected_clients = sid\n\n@sio.on('stream_video')\ndef stream_video(sid, data):\n # Decode base64-encoded video frame\n decoded_frame = base64.b64decode(data)\n # Convert the video frame to numpy array\n nparr = np.frombuffer(decoded_frame, np.uint8)\n frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n frame = cv2.resize(frame, (640, 480))\n # Show the video frame (viewer)\n cv2.imshow('Received Video', frame)\n cv2.waitKey(1)\n\n\n@sio.on('disconnect')\ndef on_disconnect(sid):\n print(f\"Client {sid} disconnected.\")\n # Menghapus session ID dari dictionary saat client terputus\n if sid in connected_clients:\n del connected_clients[sid]\n\n\ndef running():\n global dataGamepad\n GamepadReader()\n gamepad_handler = GamepadHandler()\n while True:\n events = inputs.get_gamepad()\n for event in events:\n data = gamepad_handler.handle_event(event)\n if data == 'A':\n dataGamepad = 'B'\n print(dataGamepad)\n sio.emit('receive', data, room=connected_clients)\n\n \nif __name__ == '__main__':\n process_gamepad = multiprocessing.Process(target=running)\n process_gamepad.start()\n eventlet.wsgi.server(eventlet.listen(('192.168.1.102', 5001)), app)\n\n\n","repo_name":"SyamsiarKautsar/myraspy","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26107947816","text":"from flask import Flask\nfrom flask_restful import Resource, Api\nimport json\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\nCORS(app)\n\napi = Api(app)\n\n\nclass GetDataFromDB(Resource):\n def get(self):\n data = [{\n \"timestamp\": 1590434411,\n \"dht_humidity\": 25.7,\n \"dht_temperature\": 35,\n \"idu_sound\": 'LOW',\n \"gas_lpg\": 2.123,\n \"gas_co\": 5.239,\n \"gas_smoke\": 18.125,\n },\n {\n \"timestamp\": 1590434412,\n \"dht_humidity\": 25.7,\n \"dht_temperature\": 35.1,\n \"idu_sound\": 'HIGH',\n \"gas_lpg\": 2.123,\n \"gas_co\": 5.239,\n \"gas_smoke\": 18.125,\n },\n {\n \"timestamp\": 1590434413,\n \"dht_humidity\": 25.7,\n \"dht_temperature\": 35.1,\n \"idu_sound\": 'HIGH',\n \"gas_lpg\": 2.123,\n \"gas_co\": 5.239,\n \"gas_smoke\": 18.125,\n },\n {\n \"timestamp\": 1590434414,\n \"dht_humidity\": 25.7,\n \"dht_temperature\": 35.1,\n \"idu_sound\": 'HIGH',\n \"gas_lpg\": 2.123,\n \"gas_co\": 5.239,\n \"gas_smoke\": 18.125,\n },\n ]\n\n return data\n\n\napi.add_resource(GetDataFromDB, '/getdata')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"marc101101/operation_feuertaufe","sub_path":"raspi/server_fake.py","file_name":"server_fake.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19586852600","text":"# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport json\nimport shlex\nimport subprocess\nfrom os.path import basename, dirname\n\nfrom clint.textui import puts\nfrom commcare_cloud.cli_utils import ask, print_command\nfrom commcare_cloud.colors import color_notice, color_error\nfrom commcare_cloud.commands import shared_args\nfrom commcare_cloud.commands.ansible.helpers import (\n AnsibleContext,\n DEPRECATED_ANSIBLE_ARGS,\n get_common_ssh_args,\n get_user_arg, run_action_with_check_mode)\nfrom commcare_cloud.commands.command_base import CommandBase, Argument\nfrom commcare_cloud.parse_help import ANSIBLE_HELP_OPTIONS_PREFIX, add_to_help_text, filtered_help_message\n\nNON_POSITIONAL_ARGUMENTS = (\n Argument('-b', '--become', action='store_true', help=(\n \"run operations with become (implies vault password prompting if necessary)\"\n ), include_in_docs=False),\n Argument('--become-user', help=(\n \"run operations as this user (default=root)\"\n ), include_in_docs=False),\n shared_args.SKIP_CHECK_ARG,\n shared_args.FACTORY_AUTH_ARG,\n shared_args.QUIET_ARG,\n shared_args.STDOUT_CALLBACK_ARG,\n)\n\n\nclass RunAnsibleModule(CommandBase):\n command = 'run-module'\n help = \"\"\"\n Run an arbitrary Ansible module.\n\n Example:\n\n To print out the `inventory_hostname` ansible variable for each machine, run\n ```\n commcare-cloud run-module all debug \"msg={{ inventory_hostname }}\"\n ```\n \"\"\"\n arguments = (\n shared_args.INVENTORY_GROUP_ARG,\n Argument('module', help=\"\"\"\n The name of the ansible module to run. Complete list of built-in modules can be found at\n [Module Index](http://docs.ansible.com/ansible/latest/modules/modules_by_category.html).\n \"\"\"),\n Argument('module_args', help=\"\"\"\n Args for the module, formatted as a single string.\n (Tip: put quotes around it, as it will likely contain spaces.)\n Both `arg1=value1 arg2=value2` syntax\n and `{\"arg1\": \"value1\", \"arg2\": \"value2\"}` syntax are accepted.\n \"\"\"),\n ) + NON_POSITIONAL_ARGUMENTS\n\n def modify_parser(self):\n add_to_help_text(self.parser, \"\\n{}\\n{}\".format(\n \"The ansible options below are available as well:\",\n filtered_help_message(\n \"ansible -h\",\n below_line=ANSIBLE_HELP_OPTIONS_PREFIX,\n above_line='Some modules do not make sense in Ad-Hoc (include, meta, etc)',\n exclude_args=DEPRECATED_ANSIBLE_ARGS + [\n '--help',\n '--become',\n '--become-user',\n '-i',\n '-m',\n '-a',\n '--ask-vault-pass',\n '--vault-password-file',\n '--check',\n '--diff'\n ],\n )\n ))\n\n def run(self, args, unknown_args):\n ansible_context = AnsibleContext(args)\n environment = ansible_context.environment\n\n def _run_ansible(args, *unknown_args):\n return run_ansible_module(\n ansible_context,\n args.inventory_group, args.module, args.module_args,\n become=args.become, become_user=args.become_user,\n use_factory_auth=args.use_factory_auth, extra_args=unknown_args\n )\n\n def run_check():\n with environment.secrets_backend.suppress_datadog_event():\n return _run_ansible(args, '--check', *unknown_args)\n\n def run_apply():\n return _run_ansible(args, *unknown_args)\n\n return run_action_with_check_mode(run_check, run_apply, args.skip_check, args.quiet)\n\n\ndef run_ansible_module(ansible_context, inventory_group, module, module_args,\n become=True, become_user=None, use_factory_auth=False, quiet=False,\n extra_args=(), run_command=subprocess.call):\n extra_args = tuple(extra_args)\n if run_command is ansible_json or not quiet:\n extra_args = (\"--diff\",) + extra_args\n else:\n extra_args = (\"--one-line\",) + extra_args\n\n environment = ansible_context.environment\n cmd_parts = (\n 'ansible', inventory_group,\n '-m', module,\n '-i', environment.paths.inventory_source,\n '-a', module_args,\n )\n\n public_vars = environment.public_vars\n cmd_parts += get_user_arg(public_vars, extra_args, use_factory_auth=use_factory_auth)\n become = become or bool(become_user)\n become_user = become_user\n\n if become:\n cmd_parts += ('--become',)\n if become_user:\n cmd_parts += ('--become-user', become_user)\n cmd_parts += (\n '-e', '@{}'.format(environment.paths.public_yml),\n '-e', '@{}'.format(environment.paths.generated_yml),\n )\n cmd_parts += environment.secrets_backend.get_extra_ansible_args()\n\n env_vars = ansible_context.build_env(need_secrets=become)\n if run_command is ansible_json:\n env_vars[\"ANSIBLE_VERBOSITY\"] = \"0\"\n env_vars[\"ANSIBLE_LOAD_CALLBACK_PLUGINS\"] = \"1\"\n env_vars[\"ANSIBLE_STDOUT_CALLBACK\"] = \"json\"\n\n cmd_parts_with_common_ssh_args = get_common_ssh_args(environment, use_factory_auth=use_factory_auth)\n cmd_parts += cmd_parts_with_common_ssh_args + extra_args\n cmd = ' '.join(shlex.quote(arg) for arg in cmd_parts)\n if not quiet:\n print_command(cmd)\n with environment.generated_yml():\n return run_command(cmd_parts, env=env_vars)\n\n\ndef ansible_json(*args, **kw):\n \"\"\"JSON command runner for run_ansible_module\n\n Usage: run_ansible_module(..., run_command=ansible_json)\n\n Returns a dict: {: , ...}\n \"\"\"\n try:\n output = subprocess.check_output(*args, **kw)\n except subprocess.CalledProcessError as err:\n output = err.output\n try:\n return json.loads(output)[\"plays\"][-1][\"tasks\"][-1][\"hosts\"]\n except (KeyError, IndexError, ValueError, TypeError):\n raise BadAnsibleResult(output)\n\n\nclass BadAnsibleResult(Exception):\n pass\n\n\nclass RunShellCommand(CommandBase):\n command = 'run-shell-command'\n help = \"\"\"\n Run an arbitrary command via the Ansible shell module.\n\n Example:\n\n ```\n commcare-cloud run-shell-command all 'df -h | grep /opt/data'\n ```\n\n to get disk usage stats for `/opt/data` on every machine.\n \"\"\"\n\n run_setup_on_control_by_default = False\n\n arguments = (\n shared_args.INVENTORY_GROUP_ARG,\n Argument('shell_command', help=\"\"\"\n Command to run remotely.\n (Tip: put quotes around it, as it will likely contain spaces.)\n Cannot being with `sudo`; to do that use the ansible `--become` option.\n \"\"\"),\n Argument('--silence-warnings', action='store_true',\n help=\"Silence shell warnings (such as to use another module instead).\"),\n ) + NON_POSITIONAL_ARGUMENTS\n\n def modify_parser(self):\n RunAnsibleModule(self.parser).modify_parser()\n\n def run(self, args, unknown_args):\n if args.shell_command.strip().startswith('sudo '):\n puts(color_notice(\n \"To run as another user use `--become` (for root) or `--become-user `.\\n\"\n \"Using 'sudo' directly in the command is non-standard practice.\"))\n if not ask(\"Do you know what you're doing and want to run this anyway?\", quiet=args.quiet):\n return 0 # exit code\n\n args.module = 'shell'\n if args.silence_warnings:\n args.module_args = 'warn=false ' + args.shell_command\n else:\n args.module_args = args.shell_command\n args.skip_check = True\n args.quiet = True\n del args.shell_command\n return RunAnsibleModule(self.parser).run(args, unknown_args)\n\n\nclass SendDatadogEvent(CommandBase):\n command = 'send-datadog-event'\n help = \"Track an infrastructure maintainance event in Datadog\"\n\n arguments = (\n Argument('event_title', help=\"\"\"\n Title of the datadog event.\n \"\"\"),\n Argument('event_text', help=\"\"\"\n Text content of the datadog event.\n \"\"\"),\n Argument('--tags', nargs=\"*\", help=\"\"\"\n Additional tags e.g. host:web2\n \"\"\"),\n Argument('--alert_type', choices=[\"error\", \"warning\", \"info\", \"success\"], default=\"info\", help=\"\"\"\n Alert type.\n \"\"\"),\n )\n\n def run(self, args, unknown_args):\n args.module = 'datadog_event'\n ansible_context = AnsibleContext(args)\n environment = ansible_context.environment\n datadog_api_key = environment.get_secret('DATADOG_API_KEY')\n datadog_app_key = environment.get_secret('DATADOG_APP_KEY')\n tags = args.tags or []\n tags.append(\"environment:{}\".format(args.env_name))\n args.module_args = \"api_key={api_key} app_key={app_key} \" \\\n \"tags='{tags}' text='{text}' title='{title}' aggregation_key={agg}\".format(\n api_key=datadog_api_key,\n app_key=datadog_app_key,\n tags=\",\".join(tags),\n text=args.event_text,\n title=args.event_title,\n agg='commcare-cloud'\n )\n return run_ansible_module(\n ansible_context,\n '127.0.0.1', args.module, args.module_args,\n become=False, quiet=True\n )\n\n\nclass Ping(CommandBase):\n command = 'ping'\n help = 'Ping specified or all machines to see if they have been provisioned yet.'\n\n arguments = (\n shared_args.INVENTORY_GROUP_ARG,\n ) + NON_POSITIONAL_ARGUMENTS\n\n def run(self, args, unknown_args):\n args.shell_command = 'echo \"$(hostname) - $(uptime)\"'\n args.silence_warnings = False\n return RunShellCommand(self.parser).run(args, unknown_args)\n\n\nclass KillStaleCeleryWorkers(CommandBase):\n command = 'kill-stale-celery-workers'\n help = 'Kill celery workers that failed to properly go into warm shutdown.'\n run_setup_on_control_by_default = False\n\n def run(self, args, unknown_args):\n ansible_context = AnsibleContext(args)\n group_vars = ansible_context.environment.paths.group_vars_all_yml\n return run_ansible_module(\n ansible_context,\n 'django_manage[0]',\n 'shell',\n (\n 'cd {{ code_home }}; '\n '{{ virtualenv_home }}/bin/python manage.py kill_stale_celery_workers'\n ),\n become=True,\n become_user='cchq',\n extra_args=['-e', f'@{group_vars}'] + unknown_args,\n )\n\n\nclass ListReleases(CommandBase):\n command = 'list-releases'\n help = 'List names that can be passed to `deploy --resume=RELEASE_NAME`'\n\n arguments = (\n Argument('--limit', default='webworkers[0]', help=\"\"\"\n Run command on limited host(s). Default: webworkers[0]\n \"\"\"),\n )\n\n def run(self, args, unknown_args):\n context = AnsibleContext(args)\n group_vars = context.environment.paths.group_vars_all_yml\n results = run_ansible_module(\n context,\n args.limit,\n 'shell',\n (\n 'readlink {{ www_home }}/current;'\n 'echo ---;'\n 'ls {{ www_home }}/releases/*/.build-complete;'\n 'echo ---;'\n 'ls {{ www_home }}/releases;'\n ),\n become=True,\n become_user='cchq',\n quiet=True,\n extra_args=['-e', f'@{group_vars}'] + unknown_args,\n run_command=ansible_json,\n )\n for host, result in results.items():\n output = result.get(\"stdout\", \"---\\n---\\n\")\n current, complete, releases = output.split(\"---\\n\", 2)\n current = basename(current.rstrip('\\n'))\n complete = [basename(dirname(v)) for v in complete.split('\\n') if v]\n releases = [r for r in releases.split('\\n') if r]\n if not releases:\n print(color_error(f\"{host} - no releases found\"))\n else:\n print(host)\n for release in sorted(releases):\n print(\" \", release, end=\"\")\n if release == current:\n print(\" (current)\", end=\"\")\n if release not in complete:\n print(\" (incomplete)\", end=\"\")\n print()\n return 0\n","repo_name":"dimagi/commcare-cloud","sub_path":"src/commcare_cloud/commands/ansible/run_module.py","file_name":"run_module.py","file_ext":"py","file_size_in_byte":12535,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"81"} +{"seq_id":"11141196616","text":"\"\"\"define a function which can print a dictionary wher the keys are numbers between\n1 and 3 (both included) and the values are square of Keys\n\nhints:----\nuse dict[keys]=value pattern to put entry into a dictionry,\nuse ** operator to get power of numbers.\"\"\"\n\n# def printdict():\n# d=dict()\n# d[1]=1\n# d[2]=2**2\n# d[3]=3**2\n# print(d)\n# printdict()\ndef getdict():\n name=dict()\n name[2]=2**10\n name[3]=3%6\n name[56]=56-54\n print(name)\ngetdict() \n ","repo_name":"Narendra-1997/Python-Basic-Programs","sub_path":"programms_100/thirty_two_dict.py","file_name":"thirty_two_dict.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44496413907","text":"\"\"\"Tests for explainaboard.serialization.types.\"\"\"\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport unittest\n\nfrom explainaboard.serialization.types import SerializableDataclass\n\n\n@dataclasses.dataclass\nclass MyData(SerializableDataclass):\n \"\"\"SerializableDataclass for this test.\"\"\"\n\n foo: int\n bar: str\n\n\nclass WithoutDecorator(SerializableDataclass):\n \"\"\"SerializableDataclass without decorator.\"\"\"\n\n pass\n\n\nclass SerializableDataclassTest(unittest.TestCase):\n def test_serialize(self) -> None:\n self.assertEqual(MyData(111, \"222\").serialize(), {\"foo\": 111, \"bar\": \"222\"})\n\n def test_serialize_without_decorator(self) -> None:\n with self.assertRaisesRegex(TypeError, r\"is not a dataclass\"):\n WithoutDecorator().serialize()\n\n def test_deserialize(self) -> None:\n self.assertEqual(\n MyData.deserialize({\"foo\": 333, \"bar\": \"444\"}), MyData(333, \"444\")\n )\n\n def test_deserialize_without_decorator(self) -> None:\n with self.assertRaisesRegex(TypeError, r\"is not a dataclass\"):\n WithoutDecorator.deserialize({})\n\n def test_deserialize_excessive(self) -> None:\n # Unrecognized members are ignored.\n self.assertEqual(\n MyData.deserialize({\"foo\": 555, \"bar\": \"666\", \"baz\": 777}),\n MyData(555, \"666\"),\n )\n\n def test_deserialize_deficient(self) -> None:\n with self.assertRaisesRegex(TypeError, r\"positional argument: 'bar'\"):\n MyData.deserialize({\"foo\": 888})\n","repo_name":"neulab/ExplainaBoard","sub_path":"explainaboard/serialization/types_test.py","file_name":"types_test.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":356,"dataset":"github-code","pt":"81"} +{"seq_id":"28125321474","text":"import argparse\nimport os.path\nimport re\nimport sys\nimport traceback\n\nimport maat.common as common\nimport maat.io as io\nimport maat.make\nimport maat.rule\nfrom maat.builtin import *\nfrom maat import *\n\n\nrule_re = re.compile(\"^([ \\t]*)(.*):(.*)$\")\nindent_re = re.compile(\"^([ \\t]*).*$\")\n\n# Command variables\nSCRIPTS = {}\nmake_name = \"make.maat\"\nmonitor = io.Monitor()\nDB = maat.rule.DataBase()\nfirst_goal = None\nscripts = {}\n\ndef maat_path(path):\n\treturn '\"%s\"' % path\n\ndef maat_paths(paths):\n\treturn \" \".join([maat_path(p) for p in paths])\n\n\n# variable replacement\nVARS_MAP = {\n\t'@': '\" + maat_path(maat_rule.targets[0]) + \"',\n\t'<': '\" + maat_path(maat_rule.sources[0]) + \"',\n\t'^': '\" + maat_paths(maat_rule.sources) + \"'\n}\n\nVAR_RE = re.compile(r\"\\$([^(])|\\$\\(([^)]+)\\)\")\n\ndef expand(text):\n\tres = \"\"\n\tmat = VAR_RE.search(text)\n\twhile mat:\n\t\tres = res + text[:mat.start()]\n\t\tid = mat.group(1)\n\t\tif id == None:\n\t\t\tid = mat.group(2)\n\t\ttry:\n\t\t\tres = res + VARS_MAP[id]\n\t\texcept KeyError:\n\t\t\tres = res + '\" + str(%s) + \"' % id\n\t\ttext = text[mat.end():]\n\t\tmat = VAR_RE.search(text)\n\treturn res + text\n\n\n# API variables\nTOPDIR = common.topdir\n\n# parsing the script\nclass Script:\n\t\"\"\"Class in charge of parsing and running the script in order to\n\tbuild the rule database.\"\"\"\n\n\tdef __init__(self, path, env):\n\t\tself.path = path\n\t\tself.env = dict(env)\n\t\tself.linefix = []\n\t\tSCRIPTS[path] = self\n\n\tdef make_rule(self, targets, sources, fun, file, line, cnum):\n\t\tglobal first_goal\n\t\trule = maat.rule.FunRule(targets, sources, fun)\n\t\trule.file = file\n\t\trule.line = line\n\t\tDB.add(rule)\n\t\tif first_goal == None:\n\t\t\tfirst_goal = targets[0]\n\t\tself.linefix.append(cnum)\n\n\tdef fix_line(self, line):\n\t\tprint(line, self.linefix)\n\t\tfor l in self.linefix:\n\t\t\tif line <= l:\n\t\t\t\tbreak\n\t\t\tline -= 1\n\t\treturn line\n\n\tdef eval(self, mon):\n\t\t\"\"\"Process the script to build the database. In case of error,\n\t\traise MaatError.\"\"\"\n\n\t\t# prepare state machine\n\t\tnum = 0\n\t\tNORMAL = 0\n\t\tINRULE = 1\n\t\tmode = NORMAL\n\t\tindent = None\n\t\ttargets = None\n\t\tsources = None\n\t\trnum = 0\n\t\tsource = \"\"\n\n\t\t# generate rule build line\n\t\tdef make(f):\n\t\t\tsource = \"\"\n\t\t\tif num - rnum <= 1:\n\t\t\t\tsource = source + indent + \"\\tpass\\n\"\n\t\t\treturn source + \"self.make_rule([%s], [%s], %s, \\\"%s\\\", %s, %s)\\n\" %(\n\t\t\t\t\", \".join(['\"%s\"' % t for t in targets]),\n\t\t\t\t\", \".join(['\"%s\"' % s for s in sources]),\n\t\t\t\tf, self.path, rnum + 1, num\n\t\t\t)\n\n\t\t# process the lines\n\t\tfor l in open(path):\n\t\t\tnum = num + 1\n\t\t\tif mode == NORMAL:\n\t\t\t\tm = rule_re.match(l)\n\t\t\t\tif m == None:\n\t\t\t\t\tsource = source + l\n\t\t\t\telse:\n\t\t\t\t\tmode = INRULE\n\t\t\t\t\trnum = num\n\t\t\t\t\tindent = m.group(1)\n\t\t\t\t\ttargets = m.group(2).split()\n\t\t\t\t\tsources = m.group(3).split()\n\t\t\t\t\tsource = source + indent + \"def f(maat_rule):\\n\"\n\t\t\telse:\n\t\t\t\tm = indent_re.match(l)\n\t\t\t\tif len(m.group(1)) <= len(indent):\n\t\t\t\t\tmode = NORMAL\n\t\t\t\t\tsource += make(\"f\")\n\t\t\t\telse:\n\t\t\t\t\tl = expand(l)\n\t\t\t\tsource = source + l\n\n\t\t# final rule make if any\n\t\tif mode == INRULE:\n\t\t\tsource += make(\"f\")\n\n\t\t# process the new sources\n\t\t#print(\"DEBUG:\", source)\n\t\tcode = compile(source, self.path, \"exec\")\n\t\texec(code, globals(), locals())\n\n\n# parse arguments\nparser = argparse.ArgumentParser(\n\tprog = \"maat\",\n\tdescription = \"project maker\"\n)\nparser.add_argument('goals', nargs='*',\n\thelp=\"Goals to make.\")\nparser.add_argument('--print-data-base', '-p', action=\"store_true\",\n\thelp=\"Print the rule database.\")\nargs = parser.parse_args()\npath = make_name\n\n\n# parse the script\nif not os.access(path, os.R_OK):\n\tmonitor.print_fatal(\"cannot access %s\" % path)\nmain_script = Script(path, locals())\nmain_script.eval(monitor)\n\n\n# print the data base\nif args.print_data_base:\n\tfor rule in DB.rules:\n\t\tprint(rule)\n\n# build the goals\nelse:\n\tgoals = args.goals\n\tif goals == []:\n\t\tgoals = [first_goal]\n\ttry:\n\t\tmaat.make.SeqMaker(DB).make(goals, monitor)\n\texcept Exception as e:\n\t\terror_re = re.compile(r'^\\s*File \"([^\"]*)\", line ([0-9]+), in')\n\t\tfor f in traceback.format_tb(sys.exc_info()[2]):\n\t\t\tm = error_re.match(f)\n\t\t\ttry:\n\t\t\t\tline = SCRIPTS[m.group(1)].fix_line(int(m.group(2)))\n\t\t\t\tprint(\"%s%d%s\" % (f[:m.start(2)], line, f[m.end(2):]))\n\t\t\texcept KeyError:\n\t\t\t\tprint(f)\n\t\tprint(\"%s: %s\" % (e.__class__.__name__, e))\n","repo_name":"hcasse/maat2","sub_path":"maat.py","file_name":"maat.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33361250369","text":"import os\nimport sys\nimport copy\nfrom pprint import pprint\nfrom aocd import get_data\nimport time\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# --- Day 2: Rock Paper Scissors --- #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Part 1 #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# The Elves begin to set up camp on the beach. To decide whose tent gets to be closest to the snack storage, a #\n# giant Rock Paper Scissors tournament is already in progress. #\n# #\n# Rock Paper Scissors is a game between two players. Each game contains many rounds; in each round, the players #\n# each simultaneously choose one of Rock, Paper, or Scissors using a hand shape. Then, a winner for that round #\n# is selected: Rock defeats Scissors, Scissors defeats Paper, and Paper defeats Rock. If both players choose #\n# the same shape, the round instead ends in a draw. #\n# #\n# Appreciative of your help yesterday, one Elf gives you an encrypted strategy guide (your puzzle input) that #\n# they say will be sure to help you win. \"The first column is what your opponent is going to play: A for Rock, #\n# B for Paper, and C for Scissors. The second column--\" Suddenly, the Elf is called away to help with someone's #\n# tent. #\n# #\n# The second column, you reason, must be what you should play in response: X for Rock, Y for Paper, and Z for #\n# Scissors. Winning every time would be suspicious, so the responses must have been carefully chosen. #\n# #\n# The winner of the whole tournament is the player with the highest score. Your total score is the sum of your #\n# scores for each round. The score for a single round is the score for the shape you selected (1 for Rock, 2 #\n# for Paper, and 3 for Scissors) plus the score for the outcome of the round (0 if you lost, 3 if the round was #\n# a draw, and 6 if you won). #\n# #\n# Since you can't be sure if the Elf is trying to help you or trick you, you should calculate the score you #\n# would get if you were to follow the strategy guide. #\n# #\n# For example, suppose you were given the following strategy guide: #\n# #\n# A Y #\n# B X #\n# C Z #\n# This strategy guide predicts and recommends the following: #\n# #\n# In the first round, your opponent will choose Rock (A), and you should choose Paper (Y). This ends in a win #\n# for you with a score of 8 (2 because you chose Paper + 6 because you won). #\n# In the second round, your opponent will choose Paper (B), and you should choose Rock (X). This ends in a loss #\n# for you with a score of 1 (1 + 0). #\n# The third round is a draw with both players choosing Scissors, giving you a score of 3 + 3 = 6. #\n# In this example, if you were to follow the strategy guide, you would get a total score of 15 (8 + 1 + 6). #\n# #\n# What would your total score be if everything goes exactly according to your strategy guide? #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Part 2 #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# The Elf finishes helping with the tent and sneaks back over to you. \"Anyway, the second column says how the #\n# round needs to end: X means you need to lose, Y means you need to end the round in a draw, and Z means you #\n# need to win. Good luck!\" #\n# # \n# The total score is still calculated in the same way, but now you need to figure out what shape to choose so #\n# the round ends as indicated. The example above now goes like this: #\n# #\n# In the first round, your opponent will choose Rock (A), and you need the round to end in a draw (Y), so you #\n# also choose Rock. This gives you a score of 1 + 3 = 4. #\n# In the second round, your opponent will choose Paper (B), and you choose Rock so you lose (X) with a score of #\n# 1 + 0 = 1. #\n# In the third round, you will defeat your opponent's Scissors with Rock for a score of 1 + 6 = 7. #\n# Now that you're correctly decrypting the ultra top secret strategy guide, you would get a total score of 12. #\n# #\n# Following the Elf's instructions for the second column, what would your total score be if everything goes #\n# exactly according to your strategy guide? #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n# load sample data, copied and pasted from the site into list. Each list item is one line of input\nmyset = ['A Y\\n','B X\\n','C Z\\n',]\n\n# once the test data provides the right answer: replace test data with data from the puzzle input\nmyset = get_data(day=2, year=2022).splitlines()\n# remove line feeds from the list\nfor x in range(0,len(myset)):\n myset[x] = myset[x].strip()\n# pprint(myset)\n# get the time we start running our solution: even though I'm running in debug mode\nstartime = time.time()\n\ntotal_score = 0\n\n# Dict to convert ABC, XYZ to Rock Paper Scissor.\nrps = {\n 'A': 'R',\n 'B': 'P',\n 'C': 'S',\n 'X': 'R',\n 'Y': 'P',\n 'Z': 'S',\n}\n\n# Dict for the score if you used R, P, or S\nscore = {\n 'R': 1,\n 'P': 2,\n 'S': 3,\n}\n\n# loop through the games to determine score if X Y Z is played as R, P, S\nfor game in myset:\n opp, me = game.split(' ')\n opp = rps[opp]\n me = rps[me]\n #print(f'Opponent: {opp} - Me: {me}')\n # score for the roll I made\n total_score += score[me]\n\n # Used if statements because I could not quickly figure out how to make match do what I wanted.\n if opp == me: #draw score 3 points\n total_score += 3\n elif opp == 'R' and me == 'P': # I win, score 6 points\n total_score += 6\n elif opp == 'P' and me == 'S': # I win, score 6 points\n total_score += 6\n elif opp == 'S' and me == 'R': # I win, score 6 points\n total_score += 6\n # I do not need to do losses because 0 points does not need to be added\n # I could have put the elif all on one line, but left is separate for readability\nprint(f'Part 1: Me score: {total_score}. {time.time() - startime}')\n\n# part 2\nstartime = time.time()\ntotal_score = 0\n\n# loop through the games using X, Y, Z, as Win, Lie, Lose.\nfor game in myset:\n opp, me = game.split(' ')\n opp = rps[opp]\n if me == 'Y': # must be draw\n total_score += 3 # for the draw\n total_score += score[opp] # the score for my play ( since it is a draw, my play is the same as the opp)\n elif me == 'X': # I need to loose\n if opp == 'R': # get the score for playing S\n total_score += score['S']\n elif opp == 'P': # get the score for playing R\n total_score += score['R']\n elif opp == 'S': # I get to score for playing P\n total_score += score['P']\n elif me == 'Z': # I need to win\n total_score += 6\n if opp == 'R': # get the score for playing P\n total_score += score['P']\n elif opp == 'P': # get the score for playing S\n total_score += score['S']\n elif opp == 'S': # I get to score for playing R\n total_score += score['R']\nprint(f'Part 2: Me score: {total_score}. {time.time() - startime}')\n","repo_name":"azmcnutt/AOC-Python","sub_path":"2022/Day2.py","file_name":"Day2.py","file_ext":"py","file_size_in_byte":10154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25794309876","text":"from src import __globals__\nfrom src import run_comm as comm\n\n\nclass SolidColour():\n def __init__(self, message, out_type=None, *args, **kwargs):\n self.out_type = out_type\n self.getColor()\n\n def getColor(self):\n self.color = None\n __globals__.colour_picker.colorSelected.connect(self.updateColor)\n __globals__.colour_picker.exec()\n if self.color is not None:\n self.color = list(self.color)\n self.message = f'{self.color[0]},{self.color[1]},{self.color[2]}'\n comm.run('write', f'solidcolor,{self.message}', self.out_type)\n\n def updateColor(self):\n self.color = __globals__.colour_picker.currentColor().getRgb()\n\n @staticmethod\n def effectData():\n effect_name = 'Solid Colour'\n return effect_name\n","repo_name":"milan338/FreeRGB","sub_path":"src/ui/effects/effect/SolidColour.py","file_name":"SolidColour.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4248849201","text":"# Try1 - ✅\nn, m = map(int, input().split())\n# n번까지의 구슬 중 m개를 뽑아 일열로 나열하는 경우의 수\n\nres = [0]*m\ncheck = [0]*n\n\ndef DFS(L):\n if L==m:\n print(res)\n return\n for i in range(n):\n if check[i] == 0:\n check[i]=1\n res[L] = i+1\n DFS(L+1)\n check[i]=0\n\nDFS(0)\n","repo_name":"HyunlangBan/inflean_python_algorithm","sub_path":"section_6/순열구하기/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2619919695","text":"#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\nfrom Bio import Phylo\n\n\ndef main(args):\n ranks = args.ranks\n tree = Phylo.read(args.tree, format=\"newick\")\n with open(args.outfile, 'w') as fhout:\n for t in tree.get_terminals():\n name = t.name\n taxnames = \";\".join(name.split(\"_\")[0:len(ranks)-1]+[\"_\".join(name.split(\"_\")[len(ranks)-2:])])\n fhout.write(f\"{name}\\t{taxnames}\\n\")\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"tree\", type=str,\n help=\"Input tree\")\n parser.add_argument(\"outfile\", type=str,\n help=\"Output taxon-file\")\n parser.add_argument(\"--ranks\", nargs=\"*\", default=[\"order\",\"family\",\"genus\",\"species\"],\n help=\"Ranks to extract\")\n args = parser.parse_args()\n main(args)","repo_name":"johnne/IBA-epa-ng","sub_path":"src/extract_ref_taxonomy.py","file_name":"extract_ref_taxonomy.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74140598343","text":"#from sklearn.linear_model import LogisticRegression\nfrom logistic_regression import LogisticRegression\nfrom metrics import accuracy\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\n\nbreast_cancer = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data\",\n names=[\"id\",\"diagnosis\",\"radius_mean\",\"texture_mean\",\"perimeter_mean\",\"area_mean\",\"smoothness_mean\",\"compactness_mean\",\"concavity_mean\",\"concave points_mean\",\"symmetry_mean\",\"fractal_dimension_mean\",\"radius_se\",\"texture_se\",\"perimeter_se\",\"area_se\",\"smoothness_se\",\"compactness_se\",\"concavity_se\",\"concave points_se\",\"symmetry_se\",\"fractal_dimension_se\",\"radius_worst\",\"texture_worst\",\"perimeter_worst\",\"area_worst\",\"smoothness_worst\",\"compactness_worst\",\"concavity_worst\",\"concave points_worst\",\"symmetry_worst\",\"fractal_dimension_worst\"])\n\nX = breast_cancer.drop(\"diagnosis\", axis=1).values\nY = breast_cancer['diagnosis'].values\n\nX_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=0.3, random_state=0)\n\nle = LabelEncoder()\nY_train = le.fit_transform(Y_train)\nY_test = le.transform(Y_test)\nnp.unique(Y_test)\n\nss = StandardScaler()\nX_train = ss.fit_transform(X_train)\nX_test = ss.transform(X_test)\n\nlr = LogisticRegression()\nlr.fit(X_train, Y_train)\nprint(accuracy(Y_test, lr.predict(X_test)))\n\nclass LogisticRegressionGD(object):\n \"\"\"Logistic Regression Classifier using gradient descent.\n\n Parameters\n ------------\n eta : float\n Learning rate (between 0.0 and 1.0)\n n_iter : int\n Passes over the training dataset.\n random_state : int\n Random number generator seed for random weight\n initialization.\n\n\n Attributes\n -----------\n w_ : 1d-array\n Weights after fitting.\n cost_ : list\n Logistic cost function value in each epoch.\n\n \"\"\"\n def __init__(self, eta=0.05, n_iter=100, random_state=1):\n self.eta = eta\n self.n_iter = n_iter\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\" Fit training data.\n\n Parameters\n ----------\n X : {array-like}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n self : object\n\n \"\"\"\n self.w_ = np.zeros(X.shape[1]+1)\n\n for i in range(self.n_iter):\n net_input = self.net_input(X)\n output = self.activation(net_input)\n errors = (y - output)\n self.w_[1:] += self.eta * X.T.dot(errors)\n self.w_[0] += self.eta * errors.sum()\n\n return self\n\n def net_input(self, X):\n \"\"\"Calculate net input\"\"\"\n return np.dot(X, self.w_[1:]) + self.w_[0]\n\n def activation(self, z):\n \"\"\"Compute logistic sigmoid activation\"\"\"\n return 1. / (1. + np.exp(-np.clip(z, -250, 250)))\n\n def predict(self, X):\n \"\"\"Return class label after unit step\"\"\"\n return np.where(self.net_input(X) >= 0.0, 1, 0)\n # equivalent to:\n # return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)\n\n\nlr = LogisticRegressionGD()\nlr.fit(X_train, Y_train)\nprint(accuracy(Y_test, lr.predict(X_test)))\n","repo_name":"gfgullo/scratchML","sub_path":"classification_test.py","file_name":"classification_test.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"29824465271","text":"from tensorflow.keras.utils import plot_model\n\nfrom .games import create_game\nfrom .utils import create_parser, parse_args\n\n\ndef main():\n # parse args\n parser = create_parser()\n parser.add_argument('--to_file', type=str, default='model.png')\n\n (args, cfg, agent_cls, model_cls) = parse_args(parser)\n cfg_game = cfg.get('game', {})\n cfg_game['display'] = False\n cfg_game['episodes'] = 0\n cfg_game['episode_start'] = 0\n cfg_game['load_weights'] = False\n game = create_game(args.game, cfg, agent_cls, model_cls)\n print(f'write model file to {args.to_file}')\n plot_model(game.agent.model, to_file=args.to_file, show_shapes=True, show_layer_names=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"smallRockcat/RL_DDQN","sub_path":"rl-master/rl/plot_model.py","file_name":"plot_model.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37226258511","text":"#\n# @lc app=leetcode.cn id=96 lang=python3\n#\n# [96] 不同的二叉搜索树\n#\n\n# @lc code=start\nclass Solution:\n def numTrees(self, n: int) -> int:\n if n<2:\n return 1\n dp = [0 for i in range(n+1)]\n dp[0] = 1 \n dp[1] = 1\n dp[2] = 2\n for i in range(3,n+1):\n #root结点的取值[1,i],j是当前root结点取值\n #左子树的所有结点范围是[1,j-1] j-1])\", data[prop]['syntax'])\n if keywords:\n expr = r'(?:%s){{identifier_end}}' % r'|'.join(keywords)\n value_scope = [\n rule(\n match=expr,\n scope='support.constant.property-value.css',\n ),\n rule(include='property-value'),\n ]\n\n return rule(\n match=prop+r'{{identifier_end}}',\n scope='meta.property-name.css support.type.property-name.css',\n push=[ [rule(include='immediately-pop')], 'property-meta', value_scope, 'property-colon' ]\n )\n\n return list(map(property_rule, data))\n","repo_name":"Thom1729/CSS-Future","sub_path":"src/properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"24899564764","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Willkommen zum Ordnungsrechner \n# ## Der Ordnungsrechner berechnet die Ordnung eines bestimmten Elements in einer Gruppe mit der Multiplikation und/oder Addition als Verknüpfung!\n\n\n# Initialisierung\n\nmodulo = None\nelement = None\nVerknüpfung = None\n\n\n# Fehlerabfrage und Eingabe\n\nwhile(type(modulo) != int):\n try:\n print(\"\\nBitte geben Sie eine gültige Ganzzahl ein.\")\n modulo = int(input(\"Modulo n der Restklasse: \"))\n except Exception as e:\n print(\"\\nFehlercode: \", e)\n\nwhile(type(element) != int):\n try:\n print(\"\\nBitte geben Sie eine gültige Ganzzahl ein.\")\n element = int(input(\"Element g, deren Ordnung bestimmt werden muss: \"))\n except Exception as e:\n print(\"\\nFehlercode: \", e)\n\nwhile(Verknüpfung != \"+\" and Verknüpfung != \"*\" and Verknüpfung != \"Beide\"):\n try:\n print(\"\\nBitte geben Sie eine gültige Verknüpfung (+, *, Beide) ein.\")\n Verknüpfung = input(\"Verknüpfung, welche in der Gruppe herrscht: \")\n except Exception as e:\n print(\"\\nFehlercode: \", e)\n\n\n# ### Multiplikative Verknüpfung:\n# ##### Teileranzahl und Teiler von n-1 bestimmen:\n\n\n\nif(Verknüpfung == \"*\" or Verknüpfung == \"Beide\"):\n \n \n teileranzahl = 0\n teiler = []\n zaehler = 0\n\n\n\nif(Verknüpfung == \"*\" or Verknüpfung == \"Beide\"):\n\n\n for zaehler in range(1, int(((modulo-1)/2)+1)): # Die for-Schleife findet alle echten Teiler von n-1 und die 1\n if((modulo-1)%zaehler == 0):\n teileranzahl = teileranzahl + 1; # Weiterer Teiler wurde gefunden\n teiler.append(zaehler) #Teiler wird zur Teilerliste hinzugefügt\n #print(\"Teileranzahl: \", teileranzahl)\n #print(\"Die Teiler sind: \", teiler)\n\n # Dieser Abschnitt fügt noch den unechten Teiler n-1 zu den Teilern hinzu\n teileranzahl = teileranzahl + 1\n teiler.append((modulo-1))\n\n print(\"Teileranzahl: \", teileranzahl)\n print(\"Die Teiler sind: \", teiler)\n\n\n# ##### Nun wird nach der Ordnung des Elementes g geschaut:\n\n\n\nif(Verknüpfung == \"*\" or Verknüpfung == \"Beide\"):\n\n\n index = 0\n ordnung = None\n ergebnis = None\n\n\n\n\nif(Verknüpfung == \"*\" or Verknüpfung == \"Beide\"):\n\n\n for index in range(teileranzahl):\n ergebnis = (element**teiler[index])%modulo\n if(ergebnis == 1):\n ordnung = teiler[index]\n break\n \n if(ordnung == None): # Abfangabfrage, wenn sich die Ordnung des Elements nicht berechnen lässt, da z.b. die Eingabe falsch war oder die Ordnung nicht errechenbar ist in dem Körper\n print(\"Bei der Berechnung der Ordnung ist entweder ein Fehler unterlaufen oder es kann keine Ordnung für dieses Element in dem Körper mit einer multiplikativen Verknüpfung berechnet werden. Überprüfen Sie ihre Eingabe oder kontaktieren Sie den Hersteller\")\n else:\n print(\"Die Ordnung des Elements \", element, \" in der Restklasse \", modulo, \" ist: \", ordnung)\n\n\n# ### Additive Verknüpfung:\n\n\n\nif(Verknüpfung == \"+\" or Verknüpfung == \"Beide\"):\n\n\n ordnung = None\n ergebnis = None\n\n\n\nif(Verknüpfung == \"+\" or Verknüpfung == \"Beide\"):\n\n\n for ordnung in range(1, modulo):\n ergebnis = (element*ordnung)%modulo\n if(ergebnis == 0):\n #print(ordnung)\n break\n\n\n if(ordnung == None): # Abfangabfrage, wenn sich die Ordnung des Elements nicht berechnen lässt, da z.b. die Eingabe falsch war oder die Ordnung nicht errechenbar ist in dem Körper\n print(\"Bei der Berechnung der Ordnung ist entweder ein Fehler unterlaufen oder es kann keine Ordnung für dieses Element in dem Körper mit einer multiplikativen Verknüpfung berechnet werden. Überprüfen Sie ihre Eingabe oder kontaktieren Sie den Ersteller\")\n elif(ergebnis != 0):\n print(\"Das Element \", element, \" in der Restklasse \", modulo, \" hat keine Ordnung\")\n else:\n print(\"Die Ordnung des Elements \", element, \" in der Restklasse \", modulo, \" ist: \", ordnung)","repo_name":"KxroShinigami/Ordnungsrechner-Multiplikation-Addition","sub_path":"Ordnungsrechner Restklasse Addition und Multiplikation, Kryptologie, Sem 2, Übung 3, 26.04.2022.py","file_name":"Ordnungsrechner Restklasse Addition und Multiplikation, Kryptologie, Sem 2, Übung 3, 26.04.2022.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27113507393","text":"# 리스트로 스택 선언\nstack = []\n\n# 스택에 데이터 push\nstack.append(1)\nstack.append(2)\nstack.append(3)\n\nprint(stack)\n\n# 스택에서 데이터 pop\nstack.pop()\n\nprint(stack)\n\nfrom collections import deque\n\n# 큐 선언\nqueue = deque()\n\n# 큐에 데이터 enqueue\nqueue.append(1)\nqueue.append(2)\nqueue.append(3)\nqueue.append(4)\nqueue.append(5)\n\nprint(queue)\n\n# 큐에서 데이터 dequeue\ndata = queue.popleft()\n\nprint(queue)\n\nimport heapq\n\n# 힙 선언 (기본은 최소힙)\nheap = []\n\n# 힙에 데이터 push\nvalues = [2,5,6,1,3,7,4]\nfor value in values:\n heapq.heappush(heap, value)\n\nprint(heap)\n\n# 최소값 얻기\ndata = heapq.heappop(heap)\n\nprint(data)\nprint(heap)\n\n# 최대힙 만들기\n# 힙 선언\nheap = []\n\n# 힙에 데이터 push\nvalues = [2,5,6,1,3,7,4]\n# 부호를 변경하여 push\nfor value in values:\n heapq.heappush(heap, -value)\n\nprint(heap)\n\n# 최대값 얻기\n# 부호를 다시 변경하여 원래 숫자 얻기\ndata = -heapq.heappop(heap)\n\nprint(data)\nprint(heap)","repo_name":"HaneulJung/Programmers","sub_path":"Programmers/Lv. 2/자료구조.py","file_name":"자료구조.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30219685448","text":"# This is for solving mod 37\n\n# open and read file (as string)\nf = open(\"message.txt\",\"r\")\nf = f.read()\n\n# split by space\nchars = f.split(\" \")\n\n# make them integers\nchars_int = []\nfor i in chars:\n\tchars_int.append(int(i))\n\nmsg = \"\"\n\nfor i in chars_int:\n\n\t# mod 37 all\n\tmod = i % 37\n\n\t# if 0-25, it's uppercase alphabet\n\tif mod < 26:\n\t\tmod += 65\n\t\tmsg += chr(mod)\n\n\t# if 26-35, it's number 0-9\n\telif 25 < mod < 36:\n\t\tmod -= 26\n\t\tmsg += str(mod)\n\n\t# if 36, it's underscore\n\telif mod == 36:\n\t\tmsg += \"_\"\n\n\nprint('picoCTF{' + msg + '}')\n","repo_name":"ghifarazka/PicoCTF2022-writeup","sub_path":"cryptography/basic-mod-1/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25618284521","text":"import tensorflow as tf\n\n\"\"\"\nonly used for my implementation of Googlenet\n\"\"\"\n\nglobal batch_count\nglobal bias_count\ndef set_batch_count():\n global batch_count\n global bias_count\n batch_count = 0\n bias_count = 0\n\n# constructs a convolutional of the arguments. Just makes implement a bit easier\n# stride defaults to 1 and padding defaults to SAME\ndef conv2d(convInput,\n numFilters,\n shape=[3,3],\n stride=(1,1),\n padding='SAME',\n name=None):\n global batch_count\n batch_count += 1\n return tf.nn.elu(_batchNormalize(tf.layers.conv2d(\n inputs = convInput,\n filters = numFilters, # the number of filters/output size\n kernel_size = shape, # the size of the conv window\n strides = stride,\n padding = padding,\n activation = None, # None here allows me to do it after BN.\n kernel_regularizer = None,\n name=name), name=('batch'+str(batch_count))\n ))\n\n# constructs a pooling layer of the arguments.\ndef maxPool(poolInput, size=[2,2], stride=[2,2], padding='SAME', name=None):\n return tf.nn.max_pool(\n poolInput,\n ksize = [1] + size + [1],\n strides = [1] + stride + [1],\n padding = padding,\n name=name\n )\n\ndef zeroPad(padInput, height=1, width=1, mode='CONSTANT'):\n return tf.pad(padInput,\n paddings=[[0,0],[height,height],[width,width],[0,0]],\n mode=mode)\n\n\n# function to make inception unit.\ndef inceptUnit(inceptInput, filters):\n i_1x1 = conv2d(inceptInput, filters[0], shape=[1,1])\n\n i_3x3_reduce = conv2d(inceptInput, filters[1], shape=[1,1])\n\n i_3x3 = conv2d(i_3x3_reduce, filters[2], shape=[3,3])\n\n i_5x5_reduce = conv2d(inceptInput, filters[3], shape=[1,1])\n\n i_5x5_1 = conv2d(i_5x5_reduce, filters[4], shape=[3,3])\n\n i_5x5_2 = conv2d(i_5x5_1, filters[5], shape=[3,3])\n\n pool = maxPool(inceptInput, size=[3,3], stride=[1,1])\n\n pool_proj = conv2d(pool, filters[6], shape=[1,1])\n\n return tf.concat((i_1x1, i_3x3, i_5x5_2, pool_proj), axis=-1)\n\n# returns a batch normalization layer.\ndef _batchNormalize(normInput, name=None):\n global batch_count\n batch_count += 1\n return tf.layers.batch_normalization(\n inputs=normInput,\n name=name)\n\ndef dense(denseInput, size, name=None):\n global batch_count\n batch_count += 1\n return _batchNormalize(\n tf.layers.dense(\n inputs=denseInput,\n units=size,\n name=name,\n use_bias=False\n ),\n name=('batch'+str(batch_count))\n )\n\n# constructs a bias variable.\ndef biasVariable(shape):\n initial = tf.constant(.1, shape=shape)\n return tf.Variable(initial)\n\n# returns random weights. Probably not needed and bad initialization method.\n# however, batch normalization makes it less important...\ndef weightVariable(shape):\n initial = tf.truncated_normal(shape, stddev=.1)\n return tf.Variable(initial)\n","repo_name":"arthurfeeney/PoseNetwork","sub_path":"src/custom_helper.py","file_name":"custom_helper.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17940653211","text":"# -*- coding: utf-8 -*-\n\"\"\"\n碩博士論文爬蟲\n1. 從thesis_crawler_log資料表篩選欲爬取論文的PI紀錄資料\n → 預設篩選條件:最後爬蟲日期(lastCrawledDate)小於目前日期(crawlDate)的資料\n2. 利用PI姓名碩博士於論文網搜尋並取得論文資料,存入thesis_rawdata資料表\n3. 同步更新thesis_crawler_log資料表中,PI的論文數與爬蟲日期紀錄\n\"\"\"\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nimport re\nimport logging\nimport time\nimport os\n#import pymysql\n\n# database connection\ntoolspath = r\"C:\\成大專案\\學研專家網絡\\資料與繪圖\\模組與資料表\" \nos.chdir(toolspath)\nimport toolmodules as tools\ndatabase = \"sna_network\"\nconn = tools.dbConnect(database)\ncur = conn.cursor()\n\n# 建立thesis_rawdata資料表\n#import thesisTables as t_table\n#t_table.thesisCrawlerLogTable(database)\n#t_table.thesisRawdataTable(database)\n\nstartTime = time.time()\n\n# 引入 logging 配置\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n# 爬蟲執行日期\ncrawlDate = time.strftime(\"%Y/%m/%d\")\n\n# 計算當前學年度\n#year, month, day, hour, min = map(int, time.strftime(\"%Y %m %d %H %M\").split())\n#if (month < 7):\n# academicYear = year - 1912\n#else:\n# academicYear = year - 1911\n\n\n#讓ChromeDriver中不顯示“正受到自動測試軟體控制” \nchrome_options = webdriver.ChromeOptions(); \nchrome_options.add_experimental_option(\"excludeSwitches\", ['enable-automation']);\n#driver = webdriver.Chrome()\nuser_agent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36\"\nchrome_options.add_argument('--user-agent=%s' % user_agent)\ndriver = webdriver.Chrome(r\"chromedriver.exe\", options=chrome_options);\n\n# 爬蟲funtion\ndef crawlContent(professorName, thesesCrawled):\n driver.get(\"https://ndltd.ncl.edu.tw/cgi-bin/gs32/gsweb.cgi/ccd=.9yyI2/search?mode=basic\")\n try:\n xpath = '//*[@id=\"indexsearch\"]/table/tbody/tr[1]/td/span/a[2]'\n driver.find_element_by_xpath(xpath).click()\n except:\n time.sleep(2)\n# driver.find_element_by_css_selector(\"span.schfunc a[title='指令查詢']\").click()\n driver.find_element_by_link_text(u\"指令查詢\").click()\n time.sleep(1)\n driver.find_element_by_id(\"ysearchinput0\").click()\n driver.find_element_by_id(\"ysearchinput0\").clear()\n \n #以教授名稱查詢\n driver.find_element_by_id(\"ysearchinput0\").send_keys(u\"\\\"\" + professorName + \"\\\".ad\")\n time.sleep(1)\n driver.find_element_by_id(\"gs32search\").click()\n time.sleep(1)\n \n paperAmount = driver.find_element_by_xpath(\n \"//*[@id='bodyid']/form/div/table/tbody/tr[1]/td[2]/table/tbody/tr[4]/td/div[1]/table/tbody/tr[2]/td/table[2]/tbody/tr[2]/td[2]/span[2]\").text\n paperAmount = int(paperAmount.lstrip().rstrip()) # 檢索結果的資料筆數\n cur.execute('UPDATE `thesis_crawler_log` set thesesAmount=%s where name=%s', (paperAmount, professorName))\n conn.commit()\n\n if (paperAmount == 0) or (thesesCrawled >= paperAmount):\n return\n \n try:\n driver.find_element_by_name(\"sortby\").click()\n Select(driver.find_element_by_name(\"sortby\")).select_by_visible_text(u\"畢業學年度(遞增)\")\n driver.find_element_by_name(\"sortby\").click()\n time.sleep(1)\n except Exception as e:\n print(e)\n pass\n \n xpath = \"//table[@id='tablefmt1']/tbody/tr[2]/td[3]/div/div/table/tbody/tr/td/a/span\"\n driver.find_element_by_xpath(xpath).click()\n \n if thesesCrawled != 0:\n startPage = thesesCrawled+1\n driver.find_element_by_name(\"jmpage\").click()\n driver.find_element_by_name(\"jmpage\").clear()\n driver.find_element_by_name(\"jmpage\").send_keys(startPage)\n driver.find_element_by_name(\"jumpfmt0page\").click()\n time.sleep(2)\n else:\n startPage = 1\n \n\n for j in range(startPage+1, paperAmount + 2):\n try:\n li = driver.find_elements_by_xpath('//*[@id=\"gs32_levelrecord\"]/ul/li')\n URL = 'null'\n studentName_ch = 'null'\n studentName_en = 'null'\n thesisName_ch = 'null'\n thesisName_en = 'null'\n professorName_ch = 'null'\n professorName_en = 'null'\n oralTestCommitteeName_ch = 'null'\n oralTestCommitteeName_en = 'null'\n oralTestDate = 'null'\n degreeType = 'null'\n schoolName = 'null'\n departmentName = 'null'\n discipline = 'null'\n educationType = 'null'\n publishYear = 'null'\n graduationYear = 'null'\n languageType = 'null'\n pageCount = 'null'\n Tkeyword_ch = 'null'\n Tkeyword_en = 'null'\n Tabstract_ch = 'null'\n Tabstract_en = 'null'\n tableOfContents = 'null'\n refs = 'null'\n\n for i in range(0, len(li) - 1):\n if (li[i].text == \"論文基本資料\"):\n li[i].click()\n URL = str(driver.find_element_by_xpath('//*[@id=\"fe_text1\"]').get_attribute('value'))\n tableList = driver.find_element_by_xpath('//*[@id=\"gs32_levelrecord\"]/div').text.splitlines()\n # 做字串處理\n for data in tableList:\n if ('研究生:' in data):\n studentName_ch = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('研究生(外文):' in data):\n studentName_en = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('論文名稱:' in data):\n thesisName_ch = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('論文名稱(外文):' in data):\n thesisName_en = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('指導教授:' in data):\n professorName_ch = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('指導教授(外文):' in data):\n professorName_en = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('口試委員:' in data):\n oralTestCommitteeName_ch = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('口試委員(外文):' in data):\n oralTestCommitteeName_en = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('口試日期:' in data):\n oralTestDate = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('學位類別:' in data):\n degreeType = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('校院名稱:' in data):\n schoolName = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('系所名稱:' in data):\n departmentName = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('學門:' in data):\n discipline = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('學類:' in data):\n educationType = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('論文出版年:' in data):\n publishYear = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('畢業學年度:' in data):\n graduationYear = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('語文別:' in data):\n languageType = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('論文頁數:' in data):\n pageCount = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('中文關鍵詞:' in data):\n Tkeyword_ch = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n if ('外文關鍵詞:' in data):\n Tkeyword_en = re.sub(\"\\\"\", \"`\", re.split('[: ]', data, maxsplit=1)[1]).lstrip()\n elif (li[i].text == \"摘要\"):\n li[i].click()\n Tabstract_ch = re.sub(\"\\\"\", \"`\",\n driver.find_element_by_xpath('//*[@id=\"gs32_levelrecord\"]/div').text).lstrip()\n elif (li[i].text == \"外文摘要\"):\n li[i].click()\n Tabstract_en = re.sub(\"\\\"\", \"`\",\n driver.find_element_by_xpath('//*[@id=\"gs32_levelrecord\"]/div').text).lstrip()\n elif (li[i].text == \"目次\"):\n li[i].click()\n tableOfContents = re.sub(\"\\\"\", \"`\", driver.find_element_by_xpath(\n '//*[@id=\"gs32_levelrecord\"]/div').text).lstrip()\n elif (li[i].text == \"參考文獻\"):\n li[i].click()\n refs = re.sub(\"\\\"\", \"`\",\n driver.find_element_by_xpath('//*[@id=\"gs32_levelrecord\"]/div').text).lstrip()\n else:\n pass\n\n insertThesesInfo = '''INSERT IGNORE INTO `thesis_rawdata` (URL, studentName_ch, studentName_en, thesisName_ch, thesisName_en,\\\n advisor_ch, advisor_en, oralTestCommittee_ch,\\\n oralTestCommittee_en, oralTestDate, degreeType, schoolName,\\\n departmentName, discipline, educationType, publishYear,\\\n graduationYear, languageType, pageCount, Tkeyword_ch, Tkeyword_en,\\\n Tabstract_ch, Tabstract_en, tableOfContents, refs)\\\n VALUES \\\n (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")''' % \\\n (URL, studentName_ch, studentName_en, thesisName_ch, thesisName_en, professorName_ch,\n professorName_en, oralTestCommitteeName_ch,\n oralTestCommitteeName_en, oralTestDate, degreeType, schoolName, departmentName,\n discipline, educationType, publishYear,\n graduationYear, languageType, pageCount, Tkeyword_ch, Tkeyword_en, Tabstract_ch,\n Tabstract_en, tableOfContents, refs)\n updateCrawl_log = '''UPDATE `thesis_crawler_log` SET thesesCrawled=%s WHERE name=\"%s\"''' % (j-1, professorName)\n\n logging.info('教授:{},{}/{},論文:{}'.format(professorName, (j-1), paperAmount, thesisName_ch))\n try:\n cur.execute(insertThesesInfo)\n conn.commit()\n cur.execute(updateCrawl_log)\n conn.commit() \n except Exception as e:\n print(e)\n pass\n\n driver.find_element_by_name(\"jmpage\").click()\n driver.find_element_by_name(\"jmpage\").clear()\n driver.find_element_by_name(\"jmpage\").send_keys(j)\n driver.find_element_by_name(\"jumpfmt0page\").click()\n time.sleep(2)\n\n except Exception as e:\n print(e)\n pass\n\n\n# 以日期query需爬蟲資料,執行爬蟲,更新爬蟲紀錄\nwith conn.cursor() as cursor:\n # 尚未下載過論文的PI & 下載論文數不完整的PI \n selectCommand =\\\n f\"\"\"SELECT name, lastCrawledDate, thesesAmount, thesesCrawled \n FROM `thesis_crawler_log`\n WHERE (lastCrawledDate < '{crawlDate}')\"\"\"\n \n cursor.execute(selectCommand)\n results = cursor.fetchall()\n for row in results:\n professorName = row[0]\n # Crawl all data\n\n crawlContent(professorName, row[-1])\n \n # 爬完這個教授的所有論文資料後更新爬蟲日期\n cur.execute(\"UPDATE `thesis_crawler_log` SET lastCrawledDate=%s WHERE name=%s\", (crawlDate, professorName))\n conn.commit()\n \nendtTime = time.time()\ntakeTime = endtTime-startTime\nprint(takeTime)\n\n \ndriver.quit()\nconn.close() \n","repo_name":"tsaijou/sna_network","sub_path":"project_code/thesesCrawler.py","file_name":"thesesCrawler.py","file_ext":"py","file_size_in_byte":12792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26701388810","text":"def gpa(marks):\n if marks >= 80:\n points = 5.00\n elif marks >= 70:\n points = 4.00\n elif marks >= 60:\n points = 3.50\n elif marks >= 50:\n points = 3.00\n elif marks >= 40:\n points = 2.00\n elif marks >= 33:\n points = 1.00\n else:\n points = 0.00\n return points\n\n\nsubject1 = int(input(\"Enter the obtained marks of subject-1: \"))\nsubject2 = int(input(\"Enter the obtained marks of subject-2: \"))\nsubject3 = int(input(\"Enter the obtained marks of subject-3: \"))\n\nif gpa(subject1) == 0 or gpa(subject2) == 0 or gpa(subject3) == 0:\n print(\"\\nResult Failed \\nPoints: 0.00\")\nelse:\n avg_points = (gpa(subject1)+gpa(subject2)+gpa(subject3)) / 3\n print(\"\\nResult Passed \\nPoints:\", round(avg_points, 2))","repo_name":"naiemofficial/Mathematics-with-programming","sub_path":"Python/Maths/GPA calculation by 3 number of subjects marks (variable).py","file_name":"GPA calculation by 3 number of subjects marks (variable).py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12359661706","text":"from logo import art\nprint(art)\nchoose= str(input(\"type 'e' to encrypt and 'd' to decrypt: \")).lower()\nif choose == \"e\":\n alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n txt = str(input(\"Type your message here\\n\"))\n shift = int(input(\"Type shift amount\"))\n def encrypt(a,b):\n emp_str = \"\"\n for letter in a:\n position = alphabet.index(letter)\n new_shift=position+b\n new_letter = alphabet[new_shift]\n emp_str+=new_letter\n print(f\"encrypted message = {emp_str}\")\n encrypt(a=txt,b=shift)\nelse:\n alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n e_txt = str(input(\"Type your message here\\n\"))\n shifted_amount = int(input(\"Enter the shift amount: \"))\n def decrypt(x,y):\n str = ''\n for l in x:\n position = alphabet.index(l)\n new_position = position-y\n np=alphabet[new_position]\n str+=np\n print(f\"decrypted message: {str}\")\n decrypt(x=e_txt,y=shifted_amount)\n","repo_name":"PradyBoi/Hactober_2022","sub_path":"Caesar_Cipher.py","file_name":"Caesar_Cipher.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12510950164","text":"import os\nimport numpy as np\nimport pandas as pd\nimport cv2\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\n#%matplotlib inline\n\n# 指定数据集路径\ndataset_path = 'fruit81_full'\nos.chdir(dataset_path)\nos.listdir()\n\n## 统计图像尺寸\ndf = []\nfor fruit in tqdm(os.listdir()): # 遍历每个类别\n os.chdir(fruit)\n for file in os.listdir(): # 遍历每张图像\n try:\n img = cv2.imread(file)\n df.append({'类别':fruit, '文件名':file, '图像宽':img.shape[1], '图像高':img.shape[0]})\n except:\n print(os.path.join(fruit, file), '读取错误')\n os.chdir('../')\nos.chdir('../')\n\ndf = pd.DataFrame(df)\nprint(df)\n\n\n\n## 可视化图像尺寸分布\nfrom scipy.stats import gaussian_kde\nfrom matplotlib.colors import LogNorm\n\nx = df['图像宽']\ny = df['图像高']\n\nxy = np.vstack([x,y])\nz = gaussian_kde(xy)(xy)\n\n# Sort the points by density, so that the densest points are plotted last\nidx = z.argsort()\nx, y, z = x[idx], y[idx], z[idx]\n\nplt.figure(figsize=(10,10))\n# plt.figure(figsize=(12,12))\nplt.scatter(x, y, c=z, s=5, cmap='Spectral_r')\n# plt.colorbar()\n# plt.xticks([])\n# plt.yticks([])\n\nplt.tick_params(labelsize=15)\n\nxy_max = max(max(df['图像宽']), max(df['图像高']))\nplt.xlim(xmin=0, xmax=xy_max)\nplt.ylim(ymin=0, ymax=xy_max)\n\nplt.ylabel('height', fontsize=25)\nplt.xlabel('width', fontsize=25)\n\nplt.savefig('图像尺寸分布.pdf', dpi=120, bbox_inches='tight')\n\nplt.show()\n\n","repo_name":"aspiriner/ai04","sub_path":"CheckIMG.py","file_name":"CheckIMG.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3699416613","text":"from pyspark import SQLContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.feature import CountVectorizer, StopWordsRemover\nimport numpy as np\nimport math\nimport random\nimport time\nimport pyLDAvis\n\nnp.random.seed(1234)\nrandom.seed(1234)\n\nc_k = None\nc_k_n = None\nV = None\n\n#Parameters\nalpha = 0.5\nbeta = 0.2\nK = 5\niterations = 1000\niterations_test = 200\nvocab_size = 182\nminDF = 0\nparam_update = 5\neval_every = 100\npartitions_no = 10\ntraining_ratio = 0.9\ndataset_dir = 'datasets/'\ndataset_file = 'abcnews-date-text_short.csv'\nvisualization = True\nvisualization_output_dir = 'results/vis/'\nresults_dir = \"results/\"\nresults = []\n\ndef init(row):\n z_doc = []\n c_k_doc = np.zeros(K)\n count = 0\n for idx, word_id in enumerate(row[1].indices):\n word_count = row[1].values[idx]\n topic = np.random.randint(0, K)\n z_doc.append((word_id, topic, word_count))\n c_k_doc[topic] += word_count\n count += word_count\n return row[0], z_doc, c_k_doc, count\n\n\ndef update_c_k_n(d):\n c = np.zeros((K, V), dtype=int)\n for item in d:\n word_id = int(item[0].split('_')[0])\n topic = int(item[0].split('_')[1])\n count = int(item[1])\n c[topic, word_id] += count\n return c\n\n\ndef update_c_k(d):\n c_n = np.zeros(K, dtype=int)\n for item in d:\n topic = int(item[0].split('_')[1])\n count = int(item[1])\n c_n[topic] += count\n return c_n\n\n\ndef get_c_k_m_x(z):\n matrix = np.zeros((len(z), K))\n for i in range(0, len(z)):\n matrix[i] = z[i][1]\n return matrix\n\n\ndef gibbs_sampling(z_m):\n c_k_m = z_m[2]\n c_k_local = c_k.value.copy()\n c_k_n_local = c_k_n.value.copy()\n\n for iteration in range(0, param_update):\n for idx, word_topic in enumerate(z_m[1]):\n word_id = word_topic[0]\n topic = word_topic[1]\n count = word_topic[2]\n c_k_local[topic] -= count\n c_k_n_local[topic][word_id] -= count\n c_k_m[topic] -= count\n\n p_z = np.zeros(K)\n for k in range(0, K):\n p_z[k] = ((c_k_m[k] + alpha) / (z_m[3] - count + (K * alpha))) * \\\n ((c_k_n_local[k][word_id] + beta) / (c_k_local[k] + (beta * V)))\n new_topic = np.random.multinomial(1, p_z / p_z.sum()).argmax()\n\n z_m[1][idx] = (word_id, new_topic, count)\n c_k_local[new_topic] += count\n c_k_n_local[new_topic][word_id] += count\n c_k_m[new_topic] += count\n\n return z_m[0], z_m[1], c_k_m, z_m[3]\n\n\ndef process_testset(z_m):\n c_k_m = z_m[2]\n c_k_local = c_k.value.copy()\n c_k_n_local = c_k_n.value.copy()\n\n for iteration in range(0, iterations_test):\n for idx, word_topic in enumerate(z_m[1]):\n word_id = word_topic[0]\n topic = word_topic[1]\n count = word_topic[2]\n c_k_m[topic] -= count\n\n p_z = np.zeros(K)\n for k in range(0, K):\n p_z[k] = ((c_k_m[k] + alpha) / (z_m[3] - count + (K * alpha))) * \\\n ((c_k_n_local[k][word_id] + beta) / (c_k_local[k] + (beta * V)))\n new_topic = np.random.multinomial(1, p_z / p_z.sum()).argmax()\n\n z_m[1][idx] = (word_id, new_topic, count)\n c_k_m[new_topic] += count\n\n return z_m[0], z_m[1], c_k_m, z_m[3]\n\n\ndef word_topics(z_m):\n wtt = []\n for word_topic in z_m[1]:\n word_id = word_topic[0]\n topic = word_topic[1]\n count = word_topic[2]\n wtt.append((str(word_id)+'_'+str(topic), count))\n return wtt\n\n\ndef compute_phi(c_k_x_n):\n phi = np.zeros((K, V))\n for k in range(0, K):\n for v in range(0, V):\n phi[k, v] = (c_k_x_n[k, v] + beta) / ((V * beta) + np.sum(c_k_x_n[k, :]))\n return phi\n\n\ndef compute_theta(c_k_m_x):\n theta = np.zeros((len(c_k_m_x), K))\n for m in range(0, len(c_k_m_x)):\n for k in range(0, K):\n theta[m, k] = (c_k_m_x[m, k] + alpha) / ((K * alpha) + np.sum(c_k_m_x[m, :]))\n return theta\n\n\ndef perplexity(docs, theta, phi):\n sum_nom = 0\n sum_docs_len = 0\n for d in docs:\n doc_id = d[0]\n sum_docs_len += np.sum(d[1].values)\n for word_id in d[1].indices:\n sum_nom -= np.log(np.inner(phi[:, word_id], theta[doc_id]))\n\n return math.exp(sum_nom / sum_docs_len)\n\n\ndef word_count(row):\n counts = []\n for idx in range(0, len(row[1].indices)):\n word_id = row[1].indices[idx]\n count = row[1].values[idx]\n counts.append((word_id, count))\n return counts\n\n\nstart_time = time.time()\nwith SparkSession.builder \\\n .master(\"local[*]\") \\\n .appName(\"LDA\") \\\n .getOrCreate() as spark:\n\n sc = spark.sparkContext\n sqlContext = SQLContext(sc)\n\n log4jLogger = sc._jvm.org.apache.log4j\n log = log4jLogger.LogManager.getLogger(__name__)\n log.warn(\"Spark starting...\")\n\n # 1. Read and process data\n log.warn(\"Processing dataset\")\n textFile = sc.textFile(dataset_dir+dataset_file, minPartitions=partitions_no)\n rdd = textFile.map(lambda line: line.split(',')[1], preservesPartitioning=True)\\\n .map(lambda doc: doc.split(' '), preservesPartitioning=True)\\\n .map(lambda word: [x for x in word if len(x) > 2], preservesPartitioning=True) \\\n .map(lambda word: [x.lower() for x in word], preservesPartitioning=True) \\\n .zipWithIndex().cache()\n\n df = sqlContext.createDataFrame(rdd, ['text', 'id'])\n M = df.count()\n log.warn(\"Number of docs = {0}\".format(M))\n\n # Remove stop words\n remover = StopWordsRemover(inputCol=\"text\", outputCol=\"filtered\")\n df_filtered = remover.transform(df).select('id', 'filtered')\n\n # Divide for testing and training datasets\n training_end_idx = int(training_ratio * M)\n training_set_raw = df_filtered.filter(df_filtered.id < training_end_idx).repartition(partitions_no)\n testing_set_raw = df_filtered.filter(df_filtered.id >= training_end_idx).repartition(partitions_no)\n\n # 2. Create vocabulary\n log.warn(\"Building vocabulary\")\n cv_model = CountVectorizer(inputCol=\"filtered\", outputCol=\"vectors\", minDF=minDF, vocabSize=vocab_size).fit(training_set_raw)\n V = len(cv_model.vocabulary)\n log.warn(\"Vocabulary size = {0}\".format(V))\n\n # 3. Transform documents to BOW representation:\n # each doc is represented as SparseVector: (vocabSize, {word_id:count, word_id:count,...}\n log.warn(\"Transform training dataset to bow representation\")\n training_set = cv_model.transform(training_set_raw).select('id', 'vectors').cache()\n log.warn('Training set: {0} documents'.format(training_set.count()))\n training_set_local = training_set.collect()\n\n # 4. Initialize model:\n # 4.1 each doc represented by (id, z_n array (topic to word assignment) and c_k_m (topics distribution for doc)\n # 4.2 randomly assign topic to each word in document, increment c_k_m accordingly\n z_m_n = training_set.rdd.map(init, preservesPartitioning=True).cache()\n\n z_m_n_matrix = z_m_n.flatMap(word_topics).reduceByKey(lambda a, b: a + b).collect()\n c_k_global = update_c_k(z_m_n_matrix)\n c_k_n_global = update_c_k_n(z_m_n_matrix)\n\n c_k_m_x = get_c_k_m_x(z_m_n.map(lambda x: (x[0], x[2])).sortByKey(ascending=True).collect())\n theta = compute_theta(c_k_m_x)\n phi = compute_phi(c_k_n_global)\n perplex = perplexity(training_set_local, theta, phi)\n results.append(('init train', 0, perplex))\n log.warn('Initial perplexity = {0}'.format(perplex))\n\n # 5. LDA with collapsed gibbs sampling\n log.warn(\"Training...\")\n for i in range(0, iterations, param_update):\n\n c_k = sc.broadcast(c_k_global)\n c_k_n = sc.broadcast(c_k_n_global)\n\n new_z = z_m_n.map(gibbs_sampling, preservesPartitioning=True).cache()\n\n z_m_n_matrix = new_z.flatMap(word_topics).reduceByKey(lambda a, b: a+b).collect()\n c_k_global = update_c_k(z_m_n_matrix)\n c_k_n_global = update_c_k_n(z_m_n_matrix)\n\n if eval_every is not None and i > 0 and ((i >= 100 and i % eval_every == 0) or (i < 100 and i % 5 == 0)):\n c_k_m_x = get_c_k_m_x(new_z.map(lambda x: (x[0], x[2])).sortByKey(ascending=True).collect())\n theta = compute_theta(c_k_m_x)\n phi = compute_phi(c_k_n_global)\n perplex = perplexity(training_set_local, theta, phi)\n results.append(('train', i, perplex))\n log.warn('Iteration {0} - perplexity = {1}'.format(i, perplex))\n\n z_m_n = None\n z_m_n = new_z\n new_z = None\n\n # 6. Calculate phi and theta\n c_k_m_x = get_c_k_m_x(z_m_n.map(lambda x: (x[0], x[2]), preservesPartitioning=True).sortByKey(ascending=True).collect())\n theta = compute_theta(c_k_m_x)\n phi = compute_phi(c_k_n_global)\n perplex = perplexity(training_set_local, theta, phi)\n results.append(('train', iterations, perplex))\n log.warn('Final train set perplexity = {0}'.format(perplex))\n\n # 7. Evaluate perplexity on the testing set\n log.warn(\"Transform test dataset to bow representation\")\n testing_set = cv_model.transform(testing_set_raw).select('id', 'vectors').rdd.map(lambda x: (x[0]-450, x[1])).repartition(partitions_no)\n testing_set_local = testing_set.collect()\n log.warn('Test set: {0} documents'.format(len(testing_set_local)))\n\n z_m_n_test = testing_set.map(init, preservesPartitioning=True).cache()\n c_k_m_x = get_c_k_m_x(z_m_n_test .map(lambda x: (x[0], x[2])).sortByKey(ascending=True).collect())\n theta_test = compute_theta(c_k_m_x)\n perplex = perplexity(testing_set_local, theta_test, phi)\n results.append(('test', 0, perplex))\n log.warn('Initial test set perplexity = {1}'.format(i, perplex))\n\n new_z_test = z_m_n_test.map(process_testset, preservesPartitioning=True).cache()\n\n c_k_m_x = get_c_k_m_x(new_z_test.map(lambda x: (x[0], x[2]), preservesPartitioning=True)\n .sortByKey(ascending=True).collect())\n theta_test = compute_theta(c_k_m_x)\n perplex = perplexity(testing_set_local, theta_test, phi)\n results.append(('test', iterations_test, perplex))\n log.warn('Testing set perplexity = {0}'.format(perplex))\n\n # 8. Print words in topic distribution\n for topic in range(0, K):\n log.warn(\"Topic {0}\".format(topic))\n word_ids = np.argpartition(phi[topic], -4)[-4:]\n for word_id in word_ids:\n log.warn('{0}: {1}'.format(cv_model.vocabulary[word_id], phi[topic, word_id]))\n\n end_time = time.time()\n log.warn(\"Execution time = {0} s\".format(end_time-start_time))\n results.append(('time', end_time - start_time, 0))\n\n # 9. Save perplexity and time to file:\n filename = 'result_dataset={0}_k={1}_V={2}_update={3}_iter={4}.{5}'\n if len(results) > 0:\n with open(results_dir+filename.format(dataset_file,K,V,param_update,iterations,'csv'), 'w') as f:\n for result in results:\n f.write('{0},{1},{2}\\n'.format(result[0], result[1], result[2]))\n\n\n # 9. Create and save visualization to file\n if visualization:\n docs_len = []\n for doc in training_set_local:\n docs_len.append(np.sum(doc[1].values))\n\n word_frequency = training_set.rdd.flatMap(word_count).reduceByKey(lambda a, b: a + b).sortByKey().values().collect()\n\n plot = pyLDAvis.prepare(phi, theta, docs_len, cv_model.vocabulary, word_frequency)\n pyLDAvis.save_html(plot, visualization_output_dir+\n filename.format(dataset_file,K,V,param_update,iterations, '.html'))\n\n","repo_name":"aciborowska/SparkLDA","sub_path":"SparkLDA/LDA.py","file_name":"LDA.py","file_ext":"py","file_size_in_byte":11557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15000927062","text":"import tensorflow as tf\r\nimport tensorflow_addons as tfa\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\nfrom classification_models.tfkeras import Classifiers\r\n\r\n\r\n# Seed everything\r\ndef seed_everything(seed=42):\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n tf.random.set_seed(seed)\r\n\r\n\r\n# Preprocess TF datasets for training or validation\r\ndef preprocess(image, label, seed, inputs, training=False):\r\n\r\n # Seed everything\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n tf.random.set_seed(seed)\r\n\r\n # Resize Images\r\n if inputs.model == 'ResNet34':\r\n height, width = 224, 224\r\n if inputs.model == 'ResNet50':\r\n height, width = 224, 224\r\n elif inputs.model == 'ResNet18':\r\n height, width = 224, 224\r\n elif inputs.model == 'EfficientNetB0':\r\n height, width = 224, 224\r\n elif inputs.model == 'EfficientNetB3':\r\n height, width = 300, 300\r\n elif inputs.model == 'MobileNetV2':\r\n height, width = 224, 224\r\n elif inputs.model == 'MobileNetV3Large':\r\n height, width = 224, 224\r\n elif inputs.model == 'DenseNet121':\r\n height, width = 224, 224\r\n elif inputs.model == 'Xception':\r\n height, width = 299, 299\r\n elif inputs.model == 'VGG16':\r\n height, width = 224, 224\r\n elif inputs.model == 'VGG19':\r\n height, width = 224, 224\r\n\r\n image = tf.expand_dims(image, axis=-1)\r\n image = tf.image.resize(image, [height, width])\r\n image = (image - tf.reduce_min(image)) / (\r\n tf.reduce_max(image) - tf.reduce_min(image)) * 255.0 # rescale to [0, 255]\r\n image = tf.image.grayscale_to_rgb(image)\r\n\r\n @tf.function\r\n def _specaugment(image, ERASE_TIME, ERASE_MEL):\r\n image = tf.expand_dims(image, axis=0)\r\n xoff = tf.random.uniform([2], minval=ERASE_TIME // 2, maxval=width - ERASE_TIME // 2, dtype=tf.int32)\r\n xsize = tf.random.uniform([2], minval=ERASE_TIME // 2, maxval=ERASE_TIME, dtype=tf.int32)\r\n yoff = tf.random.uniform([2], minval=ERASE_MEL // 2, maxval=height - ERASE_MEL // 2, dtype=tf.int32)\r\n ysize = tf.random.uniform([2], minval=ERASE_MEL // 2, maxval=ERASE_MEL, dtype=tf.int32)\r\n image = tfa.image.cutout(image, [height, xsize[0]], offset=[height // 2, xoff[0]])\r\n image = tfa.image.cutout(image, [height, xsize[1]], offset=[height // 2, xoff[1]])\r\n image = tfa.image.cutout(image, [ysize[0], width], offset=[yoff[0], width // 2])\r\n image = tfa.image.cutout(image, [ysize[1], width], offset=[yoff[1], width // 2])\r\n image = tf.squeeze(image, axis=0)\r\n return image\r\n\r\n if training:\r\n print('Training: Preprocessing Images')\r\n # gaussian\r\n if inputs.training['Gaussian'] is not None:\r\n gau = tf.keras.layers.GaussianNoise(inputs.training['Gaussian'])\r\n image = tf.cond(tf.random.uniform([]) < 0.5, lambda: gau(image, training=True), lambda: image)\r\n # brightness\r\n if inputs.training['Brightness'] is not None:\r\n image = tf.image.random_brightness(image, inputs.training['Brightness'])\r\n # specaugment\r\n if inputs.training['SpecAug'][0] is not None:\r\n erase_time = inputs.training['SpecAug'][0]\r\n erase_mel = inputs.training['SpecAug'][1]\r\n image = tf.cond(tf.random.uniform([]) < 0.5, lambda: _specaugment(image, erase_time, erase_mel),\r\n lambda: image)\r\n\r\n # Select preprocess input function\r\n if inputs.model == 'ResNet34':\r\n _, preprocess_input = Classifiers.get('resnet34')\r\n elif inputs.model == 'ResNet18':\r\n _, preprocess_input = Classifiers.get('resnet18')\r\n elif inputs.model == 'ResNet50':\r\n image = tf.keras.applications.resnet50.preprocess_input(image)\r\n return image, label\r\n elif inputs.model == 'EfficientNetB0' or inputs.model == 'EfficientNetB3':\r\n image = tf.keras.applications.efficientnet.preprocess_input(image)\r\n return image, label\r\n elif inputs.model == 'MobileNetV2':\r\n image = tf.keras.applications.mobilenet_v2.preprocess_input(image)\r\n return image, label\r\n elif inputs.model == 'MobileNetV3Large':\r\n image = tf.keras.applications.mobilenet_v3.preprocess_input(image)\r\n return image, label\r\n elif inputs.model == 'Xception':\r\n image = tf.keras.applications.xception.preprocess_input(image)\r\n return image, label\r\n elif inputs.model == 'DenseNet121':\r\n image = tf.keras.applications.densenet.preprocess_input(image)\r\n return image, label\r\n elif inputs.model == 'VGG16':\r\n image = tf.keras.applications.vgg16.preprocess_input(image)\r\n return image, label\r\n elif inputs.model == 'VGG19':\r\n image = tf.keras.applications.vgg19.preprocess_input(image)\r\n return image, label\r\n else:\r\n print('Model Type Not Found')\r\n image = preprocess_input(image)\r\n return image, label\r\n\r\n\r\ndef preprocess_mixup(ds):\r\n @tf.function\r\n def _mixup(inp, targ):\r\n indice = tf.range(len(inp))\r\n indice = tf.random.shuffle(indice)\r\n sinp = tf.gather(inp, indice, axis=0)\r\n starg = tf.gather(targ, indice, axis=0)\r\n alpha = 0.2\r\n t = tf.compat.v1.distributions.Beta(alpha, alpha).sample([len(inp)])\r\n tx = tf.reshape(t, [-1, 1, 1, 1])\r\n ty = tf.reshape(t, [-1, 1])\r\n x = inp * tx + sinp * (1 - tx)\r\n y = targ * ty + starg * (1 - ty)\r\n return x, y\r\n\r\n\r\n# Plot preprocessed images to check preprocessing\r\ndef check_image(dataset, xtrain):\r\n plt.ion()\r\n plt.figure(1)\r\n\r\n count = 0\r\n for ele in dataset:\r\n species_id = ele[1].numpy().argmax()\r\n image_ds = ele[0].numpy()[:, :, 2]\r\n\r\n image_train = xtrain[count]\r\n max_image_ds = str(np.round(image_ds.max(), 1))[0:5]\r\n min_image_ds = str(np.round(image_ds.min(), 1))[0:5]\r\n\r\n max_image = str(np.round(image_train.max(), 1))[0:5]\r\n min_image = str(np.round(image_train.min(), 1))[0:5]\r\n\r\n plt.clf()\r\n ax0 = plt.subplot(121)\r\n ax1 = plt.subplot(122)\r\n\r\n pos0 = ax0.imshow(image_train, vmin=0.0, vmax=255.0)\r\n pos1 = ax1.imshow(image_ds, vmin=-105.0, vmax=150.0)\r\n\r\n ax0.set_title(f'Numpy: {species_id} - Min {min_image} and Max {max_image}')\r\n ax1.set_title(f'Dataset: {species_id} - Min {min_image_ds} and Max {max_image_ds}')\r\n plt.colorbar(pos0, ax=ax0)\r\n plt.colorbar(pos1, ax=ax1)\r\n plt.draw()\r\n plt.waitforbuttonpress()\r\n count += 1\r\n\r\n\r\n# Plot preprocessed images to check preprocessing\r\ndef check_test_image(dataset, xtrain):\r\n plt.ion()\r\n plt.figure(1)\r\n\r\n count = 0\r\n for ele in dataset:\r\n images_ds = ele[0].numpy()[:, :, :, 2]\r\n images_train = xtrain[count]\r\n samples_per_audio_file = images_train.shape[0]\r\n for i in range(samples_per_audio_file):\r\n image_ds = images_ds[i]\r\n image_train = images_train[i]\r\n max_image_ds = str(np.round(image_ds.max(), 1))[0:5]\r\n min_image_ds = str(np.round(image_ds.min(), 1))[0:5]\r\n\r\n max_image = str(np.round(image_train.max(), 1))[0:5]\r\n min_image = str(np.round(image_train.min(), 1))[0:5]\r\n\r\n plt.clf()\r\n ax0 = plt.subplot(121)\r\n ax1 = plt.subplot(122)\r\n\r\n pos0 = ax0.imshow(image_train, vmin=0.0, vmax=255.0)\r\n pos1 = ax1.imshow(image_ds, vmin=-105.0, vmax=150.0)\r\n\r\n ax0.set_title(f'Numpy Sample {i}: Min {min_image} and Max {max_image}')\r\n ax1.set_title(f'Dataset: Sample {i}: Min {min_image_ds} and Max {max_image_ds}')\r\n plt.colorbar(pos0, ax=ax0)\r\n plt.colorbar(pos1, ax=ax1)\r\n plt.draw()\r\n plt.waitforbuttonpress()\r\n count += 1\r\n\r\n\r\n# Plot preprocessed images to check preprocessing\r\ndef check_test_image2(dataset, xtrain, num_windows):\r\n plt.ion()\r\n plt.figure(1)\r\n\r\n count = 0\r\n for ele in dataset:\r\n image_ds = ele[0].numpy()[:, :, 0]\r\n image_train = xtrain[count]\r\n\r\n max_image_ds = str(np.round(image_ds.max(), 1))[0:5]\r\n min_image_ds = str(np.round(image_ds.min(), 1))[0:5]\r\n\r\n max_image = str(np.round(image_train.max(), 1))[0:5]\r\n min_image = str(np.round(image_train.min(), 1))[0:5]\r\n\r\n plt.clf()\r\n ax0 = plt.subplot(121)\r\n ax1 = plt.subplot(122)\r\n\r\n pos0 = ax0.imshow(image_train, vmin=0.0, vmax=255.0)\r\n pos1 = ax1.imshow(image_ds, vmin=-105.0, vmax=150.0)\r\n\r\n ax0.set_title(f'Numpy Sample {count}: Min {min_image} and Max {max_image}')\r\n ax1.set_title(f'Dataset: Sample {count}: Min {min_image_ds} and Max {max_image_ds}')\r\n plt.colorbar(pos0, ax=ax0)\r\n plt.colorbar(pos1, ax=ax1)\r\n plt.draw()\r\n plt.waitforbuttonpress()\r\n count += 1","repo_name":"mddunlap924/Rainforest---Audio-Classificaiton","sub_path":"preprocess_dataset_sed.py","file_name":"preprocess_dataset_sed.py","file_ext":"py","file_size_in_byte":8925,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"3364578776","text":"#!/usr/bin/python3\n\nimport tensorflow as tf;\nfrom models import YOLOv3;\n\ndef main():\n\n yolov3 = YOLOv3((416, 416, 3), 80);\n yolov3.load_weights('./checkpoints/ckpt/variables/variables');\n yolov3.save('yolov3.h5');\n yolov3.save_weights('yolov3_weights.h5');\n\nif __name__ == \"__main__\":\n\n assert tf.executing_eagerly();\n main();\n\n","repo_name":"breadbread1984/YOLOv3-tf2.0","sub_path":"save_model_keras.py","file_name":"save_model_keras.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"81"} +{"seq_id":"14075381365","text":"\"\"\"Tests the ZeverSolar API wrapper.\"\"\"\nfrom unittest.mock import patch\n\nimport httpx\n\nfrom custom_components.zeversolar_local.zever_local import ZeverSolarApiClient\n\n_registry_id = \"EAB241277A36\"\n_registry_key = \"ZYXTBGERTXJLTSVS\"\n_hardware_version = \"M11\"\n_software_version = \"18625-797R+17829-719R\"\n_time = \"16:22\"\n_date = \"20/02/2022\"\n_serial_number = \"ZS150045138C0104\"\n_content = f\"1\\n1\\n{_registry_id}\\n{_registry_key}\\n{_hardware_version}\\n{_software_version}\\n{_time} {_date}\\n1\\n1\\n{_serial_number}\\n1234\\n8.9\\nOK\\nError\"\n_byte_content = _content.encode()\n\n\nasync def test_ZeverSolarApiClient_class(hass):\n \"\"\"Simple test for construction and initialization.\"\"\"\n host = \"TEST_HOST\"\n\n result_api = ZeverSolarApiClient(host)\n assert type(result_api) is ZeverSolarApiClient\n\n\nasync def test_ZeverSolarApiClient_async_get_id_ok(hass):\n \"\"\"Simple test for construction and initialization.\"\"\"\n host = \"TEST_HOST\"\n\n result_api = ZeverSolarApiClient(host)\n\n mock_response = httpx.Response(\n 200, request=httpx.Request(\"Get\", f\"https://{host}\"), content=_byte_content\n )\n\n with patch(\"zever_local.inverter.httpx.AsyncClient.get\") as mock_device_info:\n mock_device_info.return_value = mock_response\n\n expected_id = \"EA-B2-41-27-7A-36\"\n result_id = await result_api.async_get_id()\n\n assert expected_id == result_id\n\n\nasync def test_ZeverSolarApiClient_async_get_data_ok(hass):\n \"\"\"Simple test for construction and initialization.\"\"\"\n host = \"TEST_HOST\"\n\n result_api = ZeverSolarApiClient(host)\n\n mock_response = httpx.Response(\n 200, request=httpx.Request(\"Get\", f\"https://{host}\"), content=_byte_content\n )\n\n with patch(\"zever_local.inverter.httpx.AsyncClient.get\") as mock_device_info:\n mock_device_info.return_value = mock_response\n\n inverter_data = await result_api.async_get_data()\n\n energy_today_KWh = inverter_data.energy_today_KWh\n assert energy_today_KWh == 8.09\n","repo_name":"NECH2004/zeversolar_local","sub_path":"tests/test_zever_local.py","file_name":"test_zever_local.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17625164630","text":"from flask import render_template, request, redirect, url_for, flash, jsonify\nfrom app import app, db\nfrom app.models import Expense, Category\nfrom sqlalchemy.sql import func\nfrom datetime import datetime, date\nfrom sqlalchemy.orm import joinedload\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nimport logging\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"login.html\")\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"POST\":\n # Add logic to handle login (authentication)\n pass\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n # Your logout logic here\n # For example, if using Flask-Login:\n # logout_user()\n return redirect(url_for(\"index\"))\n\n\n## CATEGORIES ##\n\n\n@app.route(\"/categories\")\ndef categories():\n categories = Category.query.order_by(Category.priority).all()\n return render_template(\"categories.html\", categories=categories)\n\n\n@app.route(\"/get_categories\", methods=[\"GET\"])\ndef get_categories():\n categories = Category.query.order_by(Category.priority).all()\n return jsonify(categories=[category.serialize for category in categories])\n\n\n@app.route(\"/edit_category\", methods=[\"POST\"])\ndef edit_category():\n data = request.get_json()\n category_id = data.get(\"id\")\n field = data.get(\"field\")\n new_value = data.get(field)\n\n category = Category.query.get_or_404(category_id)\n if field == \"name\":\n category.name = new_value\n elif field == \"budget\":\n category.budget = new_value\n db.session.commit()\n\n return jsonify({\"success\": True})\n\n\n@app.route(\"/add_category\", methods=[\"POST\"])\ndef add_category():\n name = request.form.get(\"name\")\n budget = request.form.get(\"budget\")\n # Assign the next priority\n max_priority = db.session.query(db.func.max(Category.priority)).scalar() or 0\n if name and budget:\n currency = request.form.get(\"currency\")\n category = Category(\n name=name, budget=budget, currency=currency, priority=max_priority + 1\n )\n db.session.add(category)\n db.session.commit()\n flash(\"Category added successfully!\", \"success\")\n return redirect(url_for(\"categories\"))\n else:\n return \"Error\", 400\n\n\n@app.route(\"/reorder_categories\", methods=[\"POST\"])\ndef reorder_categories():\n order = request.form.getlist(\"order[]\")\n if order:\n for index, id in enumerate(order, 1):\n category = Category.query.get(id)\n category.priority = index\n db.session.commit()\n flash(\"Categories reordered successfully!\", \"success\")\n return redirect(url_for(\"categories\"))\n else:\n return \"Error\", 400\n\n\n@app.route(\"/delete_category\", methods=[\"POST\"])\ndef delete_category():\n data = request.get_json()\n category_id = data.get(\"id\")\n\n category = Category.query.get_or_404(category_id)\n db.session.delete(category)\n db.session.commit()\n\n return jsonify({\"success\": True})\n\n\n@app.route(\"/delete_expense/\", methods=[\"POST\"])\ndef delete_expense(id):\n expense = Expense.query.get_or_404(id)\n db.session.delete(expense)\n db.session.commit()\n flash(\"Expense deleted successfully!\", \"success\")\n # Redirect to the page where expenses are listed after deletion.\n return redirect(url_for(\"expenses\"))\n\n\n@app.route(\"/expense_data\", methods=[\"GET\"])\ndef expense_data():\n data = (\n db.session.query(Expense.category_id, func.sum(Expense.amount))\n .join(Category, Expense.category_id == Category.id)\n .group_by(Expense.category_id)\n .all()\n )\n categories = [\n Category.query.get(item[0]).name for item in data\n ] # Assure this line gets the category name properly\n amounts = [item[1] for item in data]\n\n return jsonify({\"categories\": categories, \"amounts\": amounts})\n\n\n@app.route(\"/add_expense\", methods=[\"POST\"])\ndef add_expense():\n category_id = request.form[\"category\"]\n amount = request.form[\"amount\"]\n name = request.form[\"name\"]\n currency = request.form[\"currency\"]\n recurring = request.form.get(\n \"recurring\", \"No\"\n ) # Set default to \"No\" if not provided\n\n # Find the category object based on the ID provided\n category = Category.query.get(category_id)\n if not category:\n flash(f\"Category with ID '{category_id}' not found!\", \"danger\")\n return redirect(\n url_for(\"expenses\")\n ) # Redirect to the expenses page if category not found\n\n date_str = request.form.get(\"date\")\n date = datetime.strptime(date_str, \"%Y-%m-%d\") if date_str else datetime.utcnow()\n\n # Create a new Expense object using the category_id\n expense = Expense(\n name=name,\n currency=currency,\n recurring=recurring,\n category_id=category.id,\n amount=amount,\n date=date,\n )\n db.session.add(expense)\n db.session.commit()\n\n flash(\"Expense added successfully!\", \"success\")\n # Redirect to the page where expenses are listed after adding.\n return redirect(url_for(\"expenses\"))\n\n\n@app.route(\"/expenses\")\ndef expenses():\n categories = Category.query.all() # Assuming you have a Category model.\n expenses = Expense.query.all() # If you want to list all expenses on the same page.\n return render_template(\"expenses.html\", categories=categories, expenses=expenses)\n\n\n# This is an example in Python using Flask*----------------------------------------------------------------------------------------\n\n\ndef get_spending_data(start_date, end_date, category_id=None):\n query = db.session.query(\n Expense.date, func.sum(Expense.amount).label(\"amount\")\n ).filter(Expense.date >= start_date, Expense.date <= end_date)\n if category_id is not None:\n query = query.filter(Expense.category_id == category_id)\n\n return query.group_by(Expense.date).all()\n\n\ndef calculate_forecast_spending(start_date, end_date, category_id=None):\n # Start of the current month\n current_month_start = date.today().replace(day=1)\n\n # Get spending data for the current month up to yesterday\n past_spending_data = get_spending_data(\n current_month_start, date.today() - timedelta(days=1), category_id\n )\n\n # Calculate total spending and average daily spending\n total_spending = sum([record.amount for record in past_spending_data])\n num_days_past = (date.today() - current_month_start).days\n average_daily_spending = total_spending / num_days_past if num_days_past > 0 else 0\n\n # Create forecast data for the entire current month\n total_days_in_month = (\n date(current_month_start.year, current_month_start.month + 1, 1)\n - timedelta(days=1)\n ).day\n forecast_spending = 0\n forecast_values = []\n\n for day in range(1, total_days_in_month + 1):\n forecast_spending += average_daily_spending\n forecast_values.append(forecast_spending)\n\n # Generate labels for the entire current month\n labels = [\n (current_month_start + timedelta(days=i - 1)).strftime(\"%Y-%m-%d\")\n for i in range(1, total_days_in_month + 1)\n ]\n\n return {\"labels\": labels, \"values\": forecast_values}\n\n\n@app.route(\"/get-forecast-spending\", methods=[\"POST\"])\ndef get_forecast_spending():\n try:\n data = request.get_json()\n category_id = data.get(\"categoryId\")\n\n # Set start and end dates to cover the entire current month\n start_date = date.today().replace(day=1)\n end_date = date(start_date.year, start_date.month + 1, 1) - timedelta(days=1)\n\n forecast_data = calculate_forecast_spending(start_date, end_date, category_id)\n\n return jsonify(forecast_data)\n except Exception as e:\n return jsonify({\"error\": str(e)}), 500\n\n\n@app.route(\"/get-category-data/\")\ndef get_category_data(category_id):\n try:\n category = Category.query.get(category_id)\n if not category:\n return jsonify({\"error\": f\"Category with ID {category_id} not found\"}), 404\n total_expenses = (\n db.session.query(func.sum(Expense.amount))\n .filter(Expense.category_id == category_id)\n .scalar()\n or 0\n )\n remainder = category.budget - total_expenses\n data = {\n \"budget\": category.budget,\n \"expenses\": total_expenses,\n \"remainder\": remainder,\n }\n return jsonify(data)\n except Exception as e:\n return jsonify({\"error\": str(e)}), 500\n\n\n@app.route(\"/get-all-categories-data\")\ndef get_all_categories_data():\n try:\n total_budget = db.session.query(func.sum(Category.budget)).scalar() or 0\n total_expenses = db.session.query(func.sum(Expense.amount)).scalar() or 0\n total_remainder = total_budget - total_expenses\n data = {\n \"totalBudget\": total_budget,\n \"totalExpenses\": total_expenses,\n \"totalRemainder\": total_remainder,\n }\n return jsonify(data)\n except Exception as e:\n return jsonify({\"error\": str(e)}), 500\n\n\n@app.route(\"/get-in-progress-spending\", methods=[\"POST\"])\ndef get_in_progress_spending():\n try:\n data = request.get_json()\n start_date = datetime.strptime(data[\"startDate\"], \"%Y-%m-%d\")\n end_date = datetime.strptime(data[\"endDate\"], \"%Y-%m-%d\")\n category_id = data.get(\"categoryId\")\n spending_data = get_spending_data(start_date, end_date, category_id)\n labels = [record.date.strftime(\"%Y-%m-%d\") for record in spending_data]\n values = [float(record.amount) for record in spending_data]\n return jsonify({\"labels\": labels, \"values\": values})\n except ValueError:\n return jsonify({\"error\": \"Invalid date format. Please use YYYY-MM-DD.\"}), 400\n except Exception as e:\n return jsonify({\"error\": str(e)}), 500\n\n\n@app.route(\"/overview\")\ndef overview():\n try:\n categories = Category.query.order_by(Category.name).all()\n all_categories_data = get_all_categories_data().get_json()\n\n start_date = datetime.now().replace(day=1)\n end_date = start_date + relativedelta(months=1, days=-1)\n\n in_progress_data = get_spending_data(start_date, end_date)\n # Ensure calculate_forecast_spending is implemented or handle it appropriately\n forecast_data = calculate_forecast_spending(start_date, end_date)\n context = {\n \"categories\": categories,\n \"totalBudget\": all_categories_data[\"totalBudget\"],\n \"totalExpenses\": all_categories_data[\"totalExpenses\"],\n \"totalRemainder\": all_categories_data[\"totalRemainder\"],\n \"inProgressLabels\": [\n record.date.strftime(\"%Y-%m-%d\") for record in in_progress_data\n ],\n \"inProgressValues\": [float(record.amount) for record in in_progress_data],\n # Assuming calculate_forecast_spending returns a dictionary with labels and values\n \"forecastLabels\": forecast_data[\"labels\"] if forecast_data else [],\n \"forecastValues\": forecast_data[\"values\"] if forecast_data else [],\n }\n\n return render_template(\"overview.html\", **context)\n except Exception as e:\n return jsonify({\"error\": str(e)}), 500\n","repo_name":"JoachimBaumann/InteractiveSystemsEngineering-SDUGroup12","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":11271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10612568751","text":"from kivy.core.window import Window\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.checkbox import CheckBox\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.dropdown import DropDown\nfrom kivy.uix.label import Label\n\nclass UserInterface:\n def __init__(self, client):\n # TODO handle connection errors properly\n self.client = client\n self.layout = GridLayout(cols = 2, pos_hint={'x': 0, 'y': 0}, size_hint=(1, 0.1))\n self.client.layout.add_widget(self.layout, index = 1000)\n \n self.client.bind('connected', self.connected)\n self.client.bind('loaded', self.loaded)\n self.client.bind('registered', self.registered)\n \n if self.client.config.get('connection', 'autoconnect') == 'yes':\n self.auto = True\n self.client.connect(self.client.config.get('connection', 'server'))\n else:\n self.auto = False\n \n self.server_input = TextInput(text = self.client.config.get('connection', 'server'))\n self.server_button = Button(text = 'Connect', size_hint = (0.25, 1))\n self.server_button.bind(on_press = self.do_connect)\n \n self.layout.add_widget(self.server_input)\n self.layout.add_widget(self.server_button)\n \n def do_connect(self, button):\n self.client.connect(self.server_input.text)\n \n self.layout.remove_widget(self.server_input)\n self.layout.remove_widget(self.server_button)\n del self.server_input, self.server_button\n \n self.connecting_label = Label(text = 'connecting...')\n self.layout.add_widget(self.connecting_label)\n \n def connected(self, event):\n if not self.auto:\n self.client.config.set('connection', 'server', self.client.server)\n self.connecting_label.text = 'loading...'\n \n def loaded(self, event):\n if self.auto:\n self.client.register(self.client.config.get('connection', '_id'))\n return\n \n self.layout.remove_widget(self.connecting_label)\n del self.connecting_label\n \n self.dropdown = DropDown()\n \n for stage in sorted(self.client.meteor.find('stages'), key=lambda x: x['title']):\n self.dropdown.add_widget(Label(text = stage['title'], size_hint_y = None, height = 40))\n \n seen = []\n for minion in sorted(self.client.meteor.find('minions', \n selector = {'stage': stage['_id'], 'type': 'media'}), key=lambda x: x['title']):\n # workaround for python-meteor bug\n if not minion['stage'] == stage['_id']: continue\n \n if minion['_id'] in seen: continue\n else: seen.append(minion['_id'])\n\n button = Button(text = minion['title'], size_hint_y = None, height = 30)\n button.minion_id = minion['_id']\n button.bind(on_press = self.do_register)\n self.dropdown.add_widget(button)\n \n self.dropdown_button = Button(text = 'Select Minion')\n self.dropdown_button.bind(on_release = self.dropdown.open)\n self.layout.add_widget(self.dropdown_button)\n \n self.auto_checkbox = CheckBox()\n self.auto_label = Label(text = 'Connect automatically on start')\n self.layout.add_widget(self.auto_checkbox)\n self.layout.add_widget(self.auto_label) \n \n def do_register(self, button):\n self.client.config.set('connection', '_id', button.minion_id)\n self.client.config.set('connection', 'autoconnect', 'yes' if self.auto_checkbox.active else 'no')\n self.client.config.write()\n self.client.register(button.minion_id)\n \n self.dropdown.dismiss()\n self.layout.remove_widget(self.dropdown_button)\n self.layout.remove_widget(self.auto_checkbox)\n self.layout.remove_widget(self.auto_label)\n del self.dropdown_button, self.dropdown, self.auto_checkbox, self.auto_label\n \n self.registering_label = Label(text = 'registering...')\n self.layout.add_widget(self.registering_label)\n \n def registered(self, event):\n if not self.auto:\n self.layout.remove_widget(self.registering_label)\n del self.registering_label\n","repo_name":"cedarproject/displayminion","sub_path":"displayminion/UserInterface.py","file_name":"UserInterface.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"19014127218","text":"class Solution:\n def thirdMax(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n one=two=three=-100000000000\n for num in nums:\n if num>one:\n one,two,three=num,one,two\n elif numtwo:\n two,three=num,two \n elif numthree:\n three=num \n \n if three!=-100000000000:\n return three\n return one\n","repo_name":"ChanchalKumarMaji/LeetCode","sub_path":"414. Third Maximum Number/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"2825895686","text":"import os\nimport sys\nimport requests\nclient_id = os.getenv('NAVER_ID')\nclient_secret = os.getenv('NAVER_SECRET')\n# url = \"https://openapi.naver.com/v1/vision/face\" // 얼굴감지\nurl = \"https://openapi.naver.com/v1/vision/celebrity\" # 유명인 얼굴인식\nfiles = {'image': open('jdragon.jpg', 'rb')}\nheaders = {'X-Naver-Client-Id': client_id, 'X-Naver-Client-Secret': client_secret }\nresponse = requests.post(url, files=files, headers=headers) ##post가 뭐냐?\nrescode = response.status_code\n\ndoc = response.json()\n\nprint(doc)\nresult = doc['faces'][0]['celebrity']['value']\nprint(result)\n\n\n# if(rescode==200):\n# print (response.text)\n# else:\n# print(\"Error Code:\" + rescode)","repo_name":"yomandawg/python-toys","sub_path":"flask_app/chatbot/day5/clova.py","file_name":"clova.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69794423306","text":"# Problem Statement: https://leetcode.com/problems/delete-and-earn/\n\nclass Solution:\n def deleteAndEarn(self, nums: List[int]) -> int:\n \n # score is the storage to collect all the copies of distinct integers in input array\n score = [0] * (max(nums)+1)\n \n # score[i] save all the copies of corresponding number in input array\n for number in nums:\n score[number]+= number\n \n \n # Reduce to the House Robbery Problem I\n # Leetcode #198: https://leetcode.com/problems/house-robber/\n size = len(score)\n \n if size <= 2:\n return max(score)\n \n max_points = [0 for _ in range(size)]\n max_points[0] = score[0]\n max_points[1] = max(score[0], score[1])\n \n for i in range(2, size):\n \n take_integer_i = max_points[i-2] + score[i]\n not_to_take_integer_i = max_points[i-1] + 0 \n \n max_points[i] = max( take_integer_i, not_to_take_integer_i)\n \n return max_points[-1]\n\n'''\nGiven nums = [2,2,3,3,3,4]\n\nreduce it to House Robbery problem\n\nscore = [0, 0, 2+2, 3+3+3, 4] = [0, 0, 4, 9, 4]\n\nMax points of Delete and Eran with [2,2,3,3,3,4]\n= Max value of Leetcode 198: House Robbery with [0, 0, 4, 9, 4]\n= 9\n'''","repo_name":"yashitanamdeo/leetcode","sub_path":"Medium/740. Delete and Earn.py","file_name":"740. Delete and Earn.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"25718586375","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\ndef _concat(xs):\n return torch.cat([x.view(-1) for x in xs])\n\n\nclass ArchitectNASP(object):\n\n def __init__(self, model, args):\n self.network_momentum = args.momentum\n self.network_weight_decay = args.weight_decay\n self.model = model\n self.optimizer = torch.optim.Adam(self.model.arch_parameters(),\n lr=args.arch_learning_rate, betas=(0.5, 0.999), \n weight_decay=args.arch_weight_decay)\n\n def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer):\n self.optimizer.zero_grad()\n self._backward_step(input_valid, target_valid)\n self.optimizer.step()\n\n def _backward_step(self, input_valid, target_valid):\n self.model.binarization()\n loss = self.model._loss(input_valid, target_valid)\n loss.backward()\n self.model.restore()\n","repo_name":"Sunshine-Ye/Beta-DARTS","sub_path":"optimizers/nasp/architect.py","file_name":"architect.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"81"} +{"seq_id":"32063363024","text":"# Для натурального n создать словарь \n# индекс-значение, состоящий из элементов \n# последовательности 3n + 1.\n# Для n = 6: {1: 4, 2: 7, 3: 10, 4: 13, 5: 16, 6: 19}\n\ndata = '1 2 3 4 5 6'.split()\nresult = map(int, data)\nmy_list = list(map(lambda x: (3*x+1), result))\nprint(my_list)\nnumbers = [i for i in range(1, 7)]\n\nd = list(zip(numbers, my_list))\nprint(d)\n\n# Вопрос: как можно в выводе разделители сделать как \":\" ?","repo_name":"reistorm/Python-dz-6","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22031298544","text":"\nfrom pathlib import Path\n\nimport pytest\n\nfrom pyfcopy import copy_file\nfrom pyfcopy_test.file_progress_listener_tester import FileProgressListenerTester\n\n\n@pytest.mark.parametrize(\"data\", [b\"\", b\"Hello World\"])\n@pytest.mark.parametrize(\"block_size\", [1, 5, 10])\ndef test_copy(data: bytes, block_size: int, tmp_path: Path):\n\n source = tmp_path / \"file.ext\"\n target = tmp_path / \"target.ext\"\n\n source.write_bytes(data)\n\n progress_listener = FileProgressListenerTester(1)\n\n copied_byte_count = copy_file(source, target, block_size=block_size, progress_listener=progress_listener)\n\n progress_listener.assert_consistent_run()\n\n assert target.read_bytes() == data\n assert copied_byte_count == len(data)\n assert progress_listener.last_size == len(data)\n\n\n@pytest.mark.parametrize(\"relative_path\", [\"\", \".\", \"..\", \"non-existent\", \"a-dir\", \"a-dir-symlink\", \"a-file-symlink\"])\ndef test_invalid_source_path(relative_path: str, tmp_path: Path):\n\n (tmp_path / \"a-file\").touch()\n (tmp_path / \"a-dir\").mkdir()\n\n (tmp_path / \"a-dir-symlink\").symlink_to(tmp_path / \"a-dir\")\n (tmp_path / \"a-file-symlink\").symlink_to(tmp_path / \"a-file\")\n\n with pytest.raises(ValueError):\n\n copy_file(tmp_path / relative_path, tmp_path / \"target\")\n\n\ndef test_already_existing_target_path(tmp_path: Path):\n\n source = tmp_path / \"file.ext\"\n target = tmp_path / \"target.ext\"\n\n source.touch()\n target.touch()\n\n with pytest.raises(ValueError):\n\n copy_file(source, target)\n\n\n@pytest.mark.parametrize(\"block_size\", [0, -1, -5])\ndef test_invalid_block_size(block_size: int, tmp_path: Path):\n\n source = tmp_path / \"file.ext\"\n target = tmp_path / \"target.ext\"\n\n source.touch()\n\n with pytest.raises(ValueError):\n\n copy_file(source, target, block_size=block_size)\n","repo_name":"arnegroskurth/pyfcopy","sub_path":"pyfcopy_test/functional/test_copy_file.py","file_name":"test_copy_file.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7307193411","text":"# coding=utf-8\nimport pandas as pd\nimport xgboost as xgb\nimport time\nfrom sklearn import metrics\nimport pickle\nimport warnings\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedKFold,KFold\nfrom sklearn.metrics import mean_squared_error\nwarnings.filterwarnings(\"ignore\")\n\n\ntrain_file = './data/training2.pkl'\ndata_set = pickle.loads(open(train_file,'rb').read(),encoding='iso-8859-1')\ndata_set.fillna(0.,inplace=True)\n\nlabel = data_set['label'].values # ndarray\n\nfeature_list = list(data_set.columns)\nfeature_list.remove('uid')\nfeature_list.remove('label')\n\ntraining = data_set[feature_list].values\n\ntest_data = pickle.load(open('./data/test.pkl','rb'),encoding='iso-8859-1')\ntest_data.fillna(0.,inplace=True)\nsub_df = test_data['uid'].copy()\n\n# 训练模型并预测出结果\n\ntest_data = test_data.values\ndtest=xgb.DMatrix(test_data)\n\nkf = KFold(n_splits=5, random_state=2017, shuffle=True)\nrmse_list = []\nsub_pred = []\nstart = time.time()\nfor train_index, val_index in kf.split(training):\n X_train, y_train, X_val, y_val = training[train_index], label[train_index], training[val_index], label[val_index]\n\n params = {\n 'booster': 'gbtree',\n 'objective': 'reg:linear',\n 'eval_metric': 'rmse',\n 'eta': 0.08,\n 'num_round': 500, #300\n 'max_depth': 3,\n 'nthread': -1,\n 'seed': 888,\n 'silent': 1,\n 'lambda':1500,\n 'min_child_weight': 4\n }\n dtrain = xgb.DMatrix(X_train, label=y_train)\n dval = xgb.DMatrix(X_val, label=y_val)\n watchlist = [(dval, 'val_x'), (dtrain, 'train_x')]\n model = xgb.train(params, dtrain, num_boost_round=50, evals=watchlist)\n\n # 对测试集进行预测(以上部分之所以划分成验证集,可以用来调参)\n y_pred = model.predict(dval, ntree_limit=model.best_ntree_limit)\n rmse = mean_squared_error(y_val, y_pred) ** 0.5\n MSE = sum(abs(y_val - y_pred)) / len(y_val)\n print(\"rmse:\", rmse)\n print(\"MSE:\",MSE)\n end = time.time()\n print (end - start)\n\n rmse_list.append(rmse)\n\n","repo_name":"yirentian/JDD_loan_forcasting","sub_path":"xgboost1_fix.py","file_name":"xgboost1_fix.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41772388011","text":"#!/usr/bin/python\n\"\"\"\nPyZinc examples\n\nThis Source Code Form is subject to the terms of the Mozilla Public\nLicense, v. 2.0. If a copy of the MPL was not distributed with this\nfile, You can obtain one at http://mozilla.org/MPL/2.0/.\n\"\"\"\nfrom opencmiss.zinc.context import Context\n\n# main start\ndef main():\n '''\n The entry point for the application, handle application arguments.\n '''\n # Create the context\n context = Context(\"image\")\n \n # Name of the file we intend to read in.\n image_name = 'drawing.png'\n \n # Get a handle to the root region\n default_region = context.getDefaultRegion()\n \n # The field module allows us to create a field image to \n # store the image data into.\n field_module = default_region.getFieldmodule()\n \n # Create an image field, we don't specify the domain here for this\n # field even though it is a source field. A temporary xi source field\n # is created for us.\n image_field = field_module.createFieldImage()\n image_field.setName('texture')\n \n # Create a stream information object that we can use to read the \n # image file from the disk\n stream_information = image_field.createStreaminformationImage()\n # Set the format for the image we want to read\n stream_information.setFileFormat(stream_information.FILE_FORMAT_PNG)\n # We are reading in a file from the local disk so our resource is a file.\n stream_information.createStreamresourceFile(image_name)\n \n # Actually read in the image file into the image field.\n ret = image_field.read(stream_information)\n if ret == 1: # CMISS_OK has the literal value 1\n print('Image successfully read into image field.')\n else:\n print('Error: failed to read image into image field.')\n \n# main end\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"maierbn/documentation","sub_path":"tutorials/image_reader/image_reader.py","file_name":"image_reader.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"71160467145","text":"#! /usr/bin/env python \n# -*- coding:utf-8 -*-\nimport os\n\nimport requests\nimport html2text\nfrom lxml import etree\n\nclass ParseChapter:\n def __init__(self):\n self.base_Url='http://www.guoxue123.cn/zhibu/0101/01xs/'\n self.encoding=''\n self.headers = {\n 'user-agent':\"Mozilla/5.0 (Windows NT 6.1; Win64; x64)\" \"AppleWebKit/537.36 (KHTML, like Gecko)\" \"Chrome/68.0.3440.106\" \"Safari/537.36\"\n }\n def setEncoding(self,req):\n encoding=''\n if req.encoding == 'ISO-8859-1':\n encodings = requests.utils.get_encodings_from_content(req.text)\n if encodings:\n encoding = encodings[0]\n else:\n encoding = req.apparent_encoding\n if encoding=='gb2312':\n encoding='gbk'\n else:\n encoding=req.encoding\n self.encoding=encoding\n req.encoding=encoding\n\n def start(self):\n req=requests.get(self.base_Url+'index.htm',headers=self.headers)\n self.setEncoding(req)\n html_tree=etree.HTML(req.text)\n div=html_tree.xpath('/html/body/div[2]/table')[0]\n links=div.xpath('.//a')\n index=0\n for link in links:\n index+=1\n link_text=link.xpath('./text()')[0]\n print(link_text)\n link_href=link.xpath('./@href')[0]\n text=self.parseChapterPage(link_href)\n file_dir='D:\\\\OneNoteTempFile\\\\新书'\n if not os.path.exists(file_dir):\n os.mkdir(file_dir)\n file_name=str(index)+link_text+'.txt'\n file_path=file_dir+'\\\\'+file_name\n with open(file_path,'a',encoding='utf-8') as f:\n f.write(text)\n # print(text)\n\n def parseChapterPage(self,chapterUrl):\n\n req=requests.get(self.base_Url+chapterUrl,headers=self.headers)\n req.encoding=self.encoding\n # print(req.text)\n html_tree=etree.HTML(req.text)\n div=html_tree.xpath('/html/body/div[2]/table')[0]\n h=html2text.HTML2Text()\n h.ignore_links=True\n text=h.handle(etree.tostring(div, pretty_print=True).decode())\n return text\n\n\npc=ParseChapter()\npc.start()","repo_name":"HuangZhenchao/python","sub_path":"py3-htmlParse/test-html2text.py","file_name":"test-html2text.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8097491286","text":"import pandas as pd\r\nimport numpy as np\r\nimport lightgbm as lgb\r\nimport joblib\r\nimport gc\r\nfrom scipy.sparse import csr_matrix\r\nfrom sklearn import preprocessing, metrics\r\n\r\n# Custom Evaluation metric: Incorporated from Tsuru's (girmdshinsei) kernal\r\n# https://www.kaggle.com/girmdshinsei/for-japanese-beginner-with-wrmsse-in-lgbm\r\n\r\nNUM_ITEMS, DAYS_PRED = 30490, 28 #Values & days to predict from submission file.\r\n\r\n# Load some old data for faster execution\r\nsales_train_val = pd.read_pickle(\"./data/sales_train_evaluation_df.pkl.compress\", compression=\"gzip\")\r\nproduct = sales_train_val[['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']].drop_duplicates()\r\ndata = pd.read_pickle(\"./data/m5_feature_engg.pkl.compress\", compression=\"gzip\")\r\n\r\nweight_mat = np.c_[np.ones([NUM_ITEMS,1]).astype(np.int8), # level 1\r\n pd.get_dummies(product.state_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.store_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.cat_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.dept_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.state_id.astype(str) + product.cat_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.state_id.astype(str) + product.dept_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.store_id.astype(str) + product.cat_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.store_id.astype(str) + product.dept_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.item_id.astype(str),drop_first=False).astype('int8').values,\r\n pd.get_dummies(product.state_id.astype(str) + product.item_id.astype(str),drop_first=False).astype('int8').values,\r\n np.identity(NUM_ITEMS).astype(np.int8) #item :level 12\r\n ].T\r\nweight_mat_csr = csr_matrix(weight_mat)\r\n\r\n\r\ndef weight_calc(data, product):\r\n # calculate the denominator of RMSSE, and calculate the weight base on sales amount\r\n sales_train_val = pd.read_csv('./input/sales_train_evaluation.csv')\r\n d_name = ['d_' + str(i+1) for i in range(1941)]\r\n sales_train_val = weight_mat_csr * sales_train_val[d_name].values\r\n\r\n df_tmp = ((sales_train_val>0) * np.tile(np.arange(1,1942),(weight_mat_csr.shape[0],1)))\r\n start_no = np.min(np.where(df_tmp==0,9999,df_tmp),axis=1)-1\r\n flag = np.dot(np.diag(1/(start_no+1)) , np.tile(np.arange(1,1942),(weight_mat_csr.shape[0],1)))<1\r\n \r\n sales_train_val = np.where(flag,np.nan,sales_train_val)\r\n print('sales_train_val')\r\n print(sales_train_val.shape)\r\n print(sales_train_val)\r\n \r\n weight1 = np.nansum(np.diff(sales_train_val,axis=1)**2,axis=1)/(1941-start_no)\r\n print('weight1')\r\n print(weight1)\r\n\r\n # calculate the sales amount for each item/level\r\n df_tmp = data[(data['date'] > '2016-03-27') & (data['date'] <= '2016-04-24')]\r\n df_tmp['amount'] = df_tmp['demand'] * df_tmp['sell_price']\r\n df_tmp =df_tmp.groupby(['id'])['amount'].apply(np.sum)\r\n df_tmp = df_tmp[product.id].values\r\n\r\n print('df_tmp')\r\n print(df_tmp)\r\n \r\n weight2 = weight_mat_csr * df_tmp\r\n weight2 = weight2/np.sum(weight2)\r\n\r\n print('weight1', weight1)\r\n print('weight2', weight2)\r\n\r\n return weight1, weight2\r\nweight1, weight2 = weight_calc(data, product)\r\n\r\n\r\ndef wrmsse(preds, data):\r\n # this function is calculate for last 28 days to consider the non-zero demand period\r\n \r\n y_true = data.get_label()\r\n y_true = y_true[-(NUM_ITEMS * DAYS_PRED):]\r\n preds = preds[-(NUM_ITEMS * DAYS_PRED):]\r\n num_col = DAYS_PRED\r\n \r\n reshaped_preds = preds.reshape(num_col, NUM_ITEMS).T\r\n reshaped_true = y_true.reshape(num_col, NUM_ITEMS).T\r\n \r\n train = weight_mat_csr*np.c_[reshaped_preds, reshaped_true]\r\n \r\n score = np.sum(\r\n np.sqrt(\r\n np.mean(\r\n np.square(\r\n train[:,:num_col] - train[:,num_col:])\r\n , axis=1) / weight1) * weight2)\r\n \r\n return 'wrmsse', score, False\r\n\r\n\r\ndef run_lgb(data, calendar, prices):\r\n print('\\nRunning lightgbm\\n')\r\n features = [\r\n \"item_id\", \"dept_id\", \"cat_id\", \"store_id\", \"state_id\", \"event_name_1\", \"event_type_1\", \"snap_CA\", \"snap_TX\", \"snap_WI\", \"sell_price\", \\\r\n # demand features.\r\n \"shift_t28\", \"rolling_std_t7\", \"rolling_std_t30\", \"rolling_std_t90\", \"rolling_std_t180\", \"rolling_mean_t7\", \"rolling_mean_t30\", \"rolling_mean_t60\", \\\r\n # price features\r\n \"price_change_t1\", \"price_change_t365\", \"rolling_price_std_t7\", \\\r\n # time features.\r\n \"year\", \"month\", \"dayofweek\", \\\r\n\r\n # \"rolling_mean_t90\", \"rolling_mean_t180\", \"rolling_skew_t30\", \"rolling_kurt_t30\", \"rolling_price_std_t30\", \\\r\n # \"is_month_end\", \"is_month_start\", \"is_weekend\", \"wday\", \\\r\n # \"price_max\", \"price_min\", \"price_std\", \"price_mean\", \"price_norm\", \"price_nunique\", \\\r\n # \"item_nunique\", \"price_momentum\", \"price_momentum_m\", \"price_momentum_y\", \r\n ]\r\n\r\n # going to evaluate with the last 28 days\r\n x_train = data[data['date'] <= '2016-04-24']\r\n y_train = x_train['demand']\r\n x_val = data[(data['date'] > '2016-04-24') & (data['date'] <= '2016-05-22')]\r\n y_val = x_val['demand']\r\n test = data[(data['date'] > '2016-05-22')]\r\n\r\n print('\\nPrint values for one of the entries')\r\n print(data[data.id == 'FOODS_3_090_CA_3_evaluation'][['id', 'demand']].head())\r\n\r\n del data\r\n gc.collect()\r\n \r\n params = {\r\n # 'boosting_type': 'gbdt',\r\n 'metric': 'rmse',\r\n 'objective': 'poisson',\r\n 'n_jobs': -1,\r\n 'seed': 20,\r\n 'learning_rate': 0.1,\r\n 'alpha': 0.1,\r\n 'lambda': 0.1,\r\n 'bagging_fraction': 0.66,\r\n 'bagging_freq': 2, \r\n 'colsample_bytree': 0.77\r\n }\r\n\r\n train_set = lgb.Dataset(x_train[features], y_train)\r\n val_set = lgb.Dataset(x_val[features], y_val)\r\n \r\n del x_train, y_train\r\n\r\n # model = lgb.train(params, train_set, num_boost_round = 5, early_stopping_rounds = 5\r\n # , valid_sets = [train_set, val_set], verbose_eval = 1, feval=wrmsse)\r\n model = lgb.train(\r\n params, train_set, num_boost_round = 1000, early_stopping_rounds = 250,\r\n valid_sets = [train_set, val_set], verbose_eval = 20\r\n )\r\n\r\n # model = lgb.train(\r\n # params, train_set, num_boost_round = 1000, early_stopping_rounds = 200, \r\n # valid_sets = [train_set, val_set], verbose_eval = 20, feval=wrmsse\r\n # )\r\n print('Saving model\\n')\r\n joblib.dump(model, './data/lgbm_0.sav')\r\n # m = joblib.load('./data/lgbm_0.sav')\r\n # zipped = zip(features, m.feature_importance())\r\n # print([(k,v) for k,v in sorted(zipped, key=lambda x: x[1])])\r\n \r\n val_pred = model.predict(x_val[features], num_iteration=model.best_iteration)\r\n val_score = np.sqrt(metrics.mean_squared_error(val_pred, y_val))\r\n print('val_pred', val_pred)\r\n print('val_score', val_score)\r\n print(f'Our val wrmsse score is {val_score}')\r\n y_pred = model.predict(test[features], num_iteration=model.best_iteration)\r\n test['demand'] = y_pred\r\n\r\n return test\r\n","repo_name":"htts001/M5_forecasting_accuracy","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24903979057","text":"import os\nfrom dotenv import load_dotenv\n\nimport requests\n\nload_dotenv()\n\nclass TestCourses:\n headers = {'Authorization': f'Token {os.environ.get(\"TOKEN_TEST\")}'}\n url_base_courses = 'http://localhost:8000/api/v2/courses/'\n\n def test_get_courses(self):\n response = requests.get(url=self.url_base_courses, headers=self.headers)\n\n assert response.status_code == 200\n \n def test_get_course(self):\n url = self.url_base_courses + '3/'\n response = requests.get(url=url, headers=self.headers)\n\n assert response.status_code == 200\n\n def test_post_course(self):\n new_course = {\n \"title\": \"ruby course 3\",\n \"url\": \"http://www.udemy.com/ruby3\"\n }\n\n response = requests.post(url=self.url_base_courses, headers=self.headers, data=new_course)\n\n assert response.status_code == 201\n assert response.json()['title'] == new_course['title']\n \n def test_put_course(self):\n updated_course = {\n \"title\": \"new ruby course\",\n \"url\": \"http://www.udemy.com/newruby\"\n }\n\n url= self.url_base_courses + '2/'\n\n response = requests.put(url=url, headers=self.headers, data=updated_course)\n\n assert response.status_code == 200\n assert response.json()['title'] == updated_course['title']\n\n def test_delete_curse(self):\n url = self.url_base_courses + '8/'\n response = requests.delete(url=url, headers=self.headers)\n\n assert response.status_code == 204 and len(response.text) == 0\n","repo_name":"daviromao/school","sub_path":"tests_pytest.py","file_name":"tests_pytest.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4971541906","text":"import pandas as pd\nfrom TopoPyScale import topoclass as tc\nfrom matplotlib import pyplot as plt\nfrom TopoPyScale import topo_sim as sim\n\n\n\n# download era5 (should read the ini to supply parameters. Area from a DEM or polygon?\n# sparse points method?\n#era5.retrieve_era5(product=\"reanalysis\", startDate=\"2020-01-01\", endDate=\"2020-01-31\", eraDir=\"/home/joel/sim/topoPyscale_paiku/inputs/climate/\",latN=29.375, latS=28.125, lonW=85.125, lonE=86.375, step=1, num_threads=10, surf_plev='surf', plevels=None)\n#plev=[600, 650, 700, 750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000 ]\n#era5.retrieve_era5(product=\"reanalysis\", startDate=\"2020-01-01\", endDate=\"2020-01-31\", eraDir=\"/home/joel/sim/topoPyscale_paiku/inputs/climate/\",latN=29.375, latS=28.125, lonW=85.125, lonE=86.375, step=1, num_threads=10, surf_plev='plev', plevels=plev)\n\n\n# ========= STEP 1 ==========\n# Load Configuration\nconfig_file = './config.ini'\nmp = tc.Topoclass(config_file)\n\n# Compute parameters of the DEM (slope, aspect, sky view factor)\nmp.compute_dem_param()\n\n# ========== STEP 2 ===========\n# Extract DEM parameters for points of interest (centroids or physical points)\n\n# ----- Option 1:\n# Compute clustering of the input DEM and extract cluster centroids\nmp.extract_topo_param()\n# plot clusters\nmp.toposub.plot_clusters_map()\nmp.toposub.write_landform()\n# plot sky view factor\n# mp.toposub.plot_clusters_map(var='svf', cmap=plt.cm.viridis)\n\n# ------ Option 2:\n# inidicate in the config file the .csv file containing a list of point coordinates (!!! must same coordinate system as DEM !!!)\n# mp.extract_pts_param(method='linear',index_col=0)\n\n# ========= STEP 3 ==========\n# compute solar geometry and horizon angles\nmp.compute_solar_geometry()\nmp.compute_horizon()\n\n# ========= STEP 4 ==========\n# Perform the downscaling\nmp.downscale_climate()\n\n# ========= STEP 5 ==========\n# explore the downscaled dataset. For instance the temperature difference between each point and the first one\n#(mp.downscaled_pts.t-mp.downscaled_pts.t.isel(point_id=0)).plot()\n#plt.show()\n\n# ========= STEP 6 ==========\n# Export output to desired format\n# mp.to_netcdf()\nmp.to_fsm()\n\n# ========= STEP 7 ===========\n# Simulate FSM\nfor i in range(mp.config.n_clusters):\n nsim = \"{:0>2}\".format(i)\n sim.fsm_nlst(31, \"./outputs/FSM_pt_\"+ nsim +\".txt\", 24)\n sim.fsm_sim(\"./fsm_sims/nlst_FSM_pt_\"+ nsim +\".txt\", \"./FSM\")\n\n# extract GST results(7)\ndf = sim.agg_by_var_fsm(7)\n\n# extraxt timeseries average\ndf_mean = sim.timeseries_means_period(df, mp.config.start_date, mp.config.end_date)\n\n# map to domain grid\nsim.topo_map(df_mean)\n\n\n","repo_name":"ArcticSnow/TopoPyScale_examples","sub_path":"ex3_switzerland_davos/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"40061878882","text":"import MetaTrader5 as mt5\nimport datetime\nimport pytz\nModel_type = 'v4_CNN+GRU'\nsymbol = \"EURUSD\"\ntimeframe = mt5.TIMEFRAME_M1\ntime_series = 5\nDebug = False\n\ntimezone = pytz.utc\nutc_from = datetime.datetime(2022, 10, 1, tzinfo=timezone)","repo_name":"onesmus1024/mt5_EA_v4","sub_path":"mt5_global/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73426324745","text":"from PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QLabel\nfrom PyQt5.QtGui import QIcon, QPixmap\nimport sys\n\n\nclass Window(QDialog):\n def __init__(self):\n super().__init__()\n self.title = \"PyQt5 Adding Image To Label\"\n self.icon = \"icon.png\"\n self.top = 200\n self.left = 500\n self.width = 400\n self.height = 300\n self.init_window()\n\n def init_window(self):\n self.setWindowTitle(self.title)\n self.setWindowIcon(QIcon(self.icon))\n self.setGeometry(self.left, self.top, self.width, self.height)\n vbox = QVBoxLayout()\n labelImage = QLabel('Python image', self)\n pixmap = QPixmap('pic.png')\n labelImage.setPixmap(pixmap)\n vbox.addWidget(labelImage)\n self.setLayout(vbox)\n self.show()\n\n\nif __name__ == '__main__':\n App = QApplication(sys.argv)\n window = Window()\n sys.exit(App.exec())","repo_name":"xietx1995/pyqt5-code-snippets","sub_path":"06_image/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74799473226","text":"from match import Match\nimport sys\nimport chess.pgn\nimport io\n\npromotions = {\n 'q': 'queen',\n 'r': 'rook',\n 'b': 'bishop',\n 'n': 'knight'\n}\n\ndef main(args):\n for arg in args:\n pgn = open(arg)\n game = chess.pgn.read_game(pgn)\n while game is not None:\n if not game.errors:\n add_game(game)\n game = chess.pgn.read_game(pgn)\n\n\ndef add_game(game):\n match = Match()\n board = game.board()\n for move in game.mainline_moves():\n strmove = str(move)\n p = ''\n ox = 'abcdefgh'.find(strmove[0])\n oy = '87654321'.find(strmove[1])\n nx = 'abcdefgh'.find(strmove[2])\n ny = '87654321'.find(strmove[3])\n if len(str(move)) > 4:\n p = promotions[strmove[4]]\n\n error = match.move(ox, oy, nx, ny, p)\n if error:\n print(str(match.board))\n print(strmove)\n print(f'Gatekeeper again: {match.board.gatekeeper(ox, oy, nx, ny, False, p, True)}')\n return\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"canozo/sistemas-inteligentes","sub_path":"cchess/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31221277272","text":"## This code was stolen from: https://github.com/chenjie/PyTorch-CIFAR-10-autoencoder\n\n\n# Numpy\nimport numpy as np\n\n# Torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\n# Torchvision\nimport torchvision\nimport torchvision.transforms as transforms\n\n# Matplotlib\n# %matplotlib inline\nimport matplotlib.pyplot as plt\n\n# OS\nimport os\nimport argparse\n\n# My own implementations\nfrom gaeLDALoss import gae_lda_loss\n\n# Set random seed for reproducibility\nSEED = 87\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(SEED)\n\n\ndef print_model(encoder, decoder):\n print(\"============== Encoder ==============\")\n print(encoder)\n print(\"============== Decoder ==============\")\n print(decoder)\n print(\"\")\n\n\ndef create_model(embDim = 100):\n autoencoder = Autoencoder(embDim=embDim)\n print_model(autoencoder.encoder, autoencoder.decoder)\n if torch.cuda.is_available():\n autoencoder = autoencoder.cuda()\n print(\"Model moved to GPU in order to speed up training.\")\n return autoencoder\n\n\ndef get_torch_vars(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)\n\ndef imshow(img):\n npimg = img.cpu().numpy()\n plt.axis('off')\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n\nclass Autoencoder(nn.Module):\n def __init__(self, embDim=100):\n super(Autoencoder, self).__init__()\n # Input size: [batch, 3, 32, 32]\n # Output size: [batch, 3, 32, 32]\n self.encoder = nn.Sequential(\n nn.Conv2d(3, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]\n nn.ReLU(),\n nn.Conv2d(12, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]\n nn.ReLU(),\n\t\t\tnn.Conv2d(24, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]\n nn.ReLU(),\n# \t\t\tnn.Conv2d(48, 96, 4, stride=2, padding=1), # [batch, 96, 2, 2]\n# nn.ReLU(),\n )\n self.compressor = nn.Sequential(\n nn.Flatten(),\n nn.LazyLinear(out_features=embDim),\n nn.ReLU()\n )\n self.uncompressor = nn.Sequential(\n nn.Linear(in_features=embDim, out_features=768),\n nn.ReLU(),\n nn.Unflatten(dim=-1, unflattened_size=(48, 4, 4))\n )\n self.decoder = nn.Sequential(\n# nn.ConvTranspose2d(96, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]\n# nn.ReLU(),\n\t\t\tnn.ConvTranspose2d(48, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]\n nn.ReLU(),\n\t\t\tnn.ConvTranspose2d(24, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]\n nn.ReLU(),\n nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1), # [batch, 3, 32, 32]\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n encoded = self.encoder(x)\n compressed = self.compressor(encoded)\n uncompressed = self.uncompressor(compressed)\n decoded = self.decoder(uncompressed)\n return encoded, decoded\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Train Autoencoder\")\n parser.add_argument(\"--valid\", action=\"store_true\", default=False,\n help=\"Perform validation only.\")\n parser.add_argument(\"-e\", \"--epochs\", default=1, type=int,\n help='Number of epochs to train the AE on [default: 1]')\n parser.add_argument(\"-b\", \"--batchSize\", default=64, type=int,\n help=\"Batch size of the data used for training and testing [default: 64]\")\n parser.add_argument(\"-z\", \"--embDim\", default=100, type=int,\n help=\"Number of dimensions for the low dimension representation [default: 100]\")\n args = parser.parse_args()\n\n print(args.epochs)\n\n\n numEpochs = args.epochs\n batchSize = args.batchSize\n embeddingDim = args.embDim\n\n # Create model\n autoencoder = create_model(embDim=embeddingDim)\n\n # Load data\n transform = transforms.Compose(\n [transforms.ToTensor(), ])\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batchSize,\n shuffle=True, num_workers=2)\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=batchSize,\n shuffle=False, num_workers=2)\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n if args.valid:\n print(\"Loading checkpoint...\")\n autoencoder.load_state_dict(torch.load(\"./models/autoencoder.pkl\"))\n dataiter = iter(testloader)\n images, labels = dataiter._next_data()\n print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(16)))\n imshow(torchvision.utils.make_grid(images))\n\n images = get_torch_vars(images)\n\n decoded_imgs = autoencoder(images)[1]\n imshow(torchvision.utils.make_grid(decoded_imgs.data))\n\n exit(0)\n\n # Define an optimizer and criterion\n optimizer = optim.Adam(autoencoder.parameters())\n\n for epoch in range(numEpochs):\n\n running_loss = 0.0\n for i, (inputs, labels) in enumerate(trainloader, 0):\n inputs = get_torch_vars(inputs)\n\n # ============ Forward ============\n encoded, outputs = autoencoder(inputs)\n\n # ============ LDA loss ============\n ldaLoss = gae_lda_loss(inputs=inputs, outputs=outputs, labels=labels)\n\n # ============ Backward ============\n optimizer.zero_grad()\n ldaLoss.backward()\n optimizer.step()\n\n # ============ Logging ============\n running_loss += ldaLoss.data\n if i % 2000 == 1999:\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n \n print('Finished epoch {}'.format(str(epoch)))\n\n print('Finished Training')\n print('Saving Model...')\n if not os.path.exists('./models'):\n os.mkdir('./models')\n torch.save(autoencoder.state_dict(), \"./models/gae_autoencoder.pkl\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"arickGrootveld/GV_AutoencoderProject","sub_path":"gae.py","file_name":"gae.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44506662893","text":"import bz2\nimport gzip\nimport logging\nimport math\nimport multiprocessing as mp\nimport os\nimport platform\nimport resource\nimport signal\nimport subprocess as sp\nimport sys\nimport time\nimport glob\nfrom collections import Counter\nfrom enum import Enum, auto\nimport pickle\nimport numpy as np\nimport psutil\nfrom Bio import SeqIO\nfrom Bio.SeqFeature import SeqFeature, FeatureLocation\nimport pandas as pd\nimport numpy as np\nfrom BCBio import GFF\nimport pickle\nfrom dna_features_viewer import BiopythonTranslator\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\n\nmatplotlib.use('Agg')\nlogging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO,datefmt=\"%d-%b-%y %H:%M:%S\")\n\n#Most things are shamelessly copied from checkv\n\n\nclass Compression(Enum):\n gzip = auto()\n bzip2 = auto()\n xz = auto()\n noncompressed = auto()\n\n\ndef max_mem_usage():\n \"\"\"Return max mem usage (GB) of self and child processes\"\"\"\n max_mem_self = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n max_mem_child = resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss\n if platform.system() == \"Linux\":\n return (max_mem_self + max_mem_child) / float(1e6)\n else:\n return (max_mem_self + max_mem_child) / float(1e9)\n\n\ndef is_compressed(filepath):\n \"\"\"Checks if a file is compressed (gzip, bzip2 or xz)\"\"\"\n with open(filepath, \"rb\") as fin:\n signature = fin.peek(8)[:8]\n if tuple(signature[:2]) == (0x1F, 0x8B):\n return Compression.gzip\n elif tuple(signature[:3]) == (0x42, 0x5A, 0x68):\n return Compression.bzip2\n elif tuple(signature[:7]) == (0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00, 0x00):\n return Compression.xz\n else:\n return Compression.noncompressed\n\n\ndef get_compressed_file_handle(path):\n filepath_compression = is_compressed(path)\n if filepath_compression == Compression.gzip:\n f = gzip.open(path, \"rt\")\n elif filepath_compression == Compression.bzip2:\n f = bz2.open(path, \"rt\")\n elif filepath_compression == Compression.xz:\n f = lzma.open(path, \"rt\")\n else:\n f = open(path, \"r\")\n return f\n\n\ndef get_logger_old(quiet):\n if not quiet:\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n else:\n logging.basicConfig(level=logging.WARNING, format=\"%(message)s\")\n return logging.getLogger()\n\n\ndef get_logger(quiet):\n logger = logging.getLogger(__name__)\n if not quiet:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.WARNING)\n formatter = logging.Formatter(fmt=\"%(message)s\")\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.handlers.clear()\n logger.addHandler(stream_handler)\n return logger\n\n\ndef check_fasta(path, tmp_dir):\n checkpoint_file = os.path.join(tmp_dir, \"input_validation_checkpoint\")\n if not os.path.isfile(checkpoint_file):\n f = get_compressed_file_handle(path)\n fasta_parser = SeqIO.parse(f, \"fasta\")\n if not any(fasta_parser):\n f.close()\n sys.stderr.write(\"You input FASTA file is empty or not properly formatted.\")\n sys.exit()\n else:\n f = get_compressed_file_handle(path)\n fasta_parser = SeqIO.parse(f, \"fasta\")\n seq_id_counter = Counter([record.id for record in fasta_parser])\n f.close()\n repeated_seq_ids = [i for i, j in seq_id_counter.items() if j > 1]\n if repeated_seq_ids:\n sys.stderr.write(\n f\"Please remove duplicated sequence IDs from the input FASTA file: {', '.join(repeated_seq_ids)}\"\n )\n sys.exit()\n else:\n with open(checkpoint_file, \"w\") as fout:\n pass\n\n\ndef check_executables(requirements):\n fails = 0\n for program in requirements:\n found = False\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path.strip('\"'), program)\n if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):\n found = True\n break\n if not found:\n msg = f\"Error: required program '{program}' not executable or not found on $PATH\\n\"\n sys.stderr.write(msg)\n fails += 1\n if fails > 0:\n sys.exit()\n\n\ndef init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n\ndef terminate_tree(pid, including_parent=True):\n parent = psutil.Process(pid)\n for child in parent.children(recursive=True):\n child.terminate()\n if including_parent:\n parent.terminate()\n\n\ndef async_parallel(function, argument_list, threads):\n \"\"\"Based on: https://gist.github.com/admackin/003dd646e5fadee8b8d6\"\"\"\n # threads = len(argument_list) ## why is this being defined again here?\n pool = mp.Pool(threads, init_worker)\n try:\n results = []\n for arguments in argument_list:\n p = pool.apply_async(function, args=arguments)\n results.append(p)\n pool.close()\n while True:\n if all(r.ready() for r in results):\n return [r.get() for r in results]\n time.sleep(1)\n except KeyboardInterrupt:\n # when you want to kill everything, including this program\n # https://www.reddit.com/r/learnpython/comments/7vwyez/how_to_kill_child_processes_when_using/dtw3oh4/\n pid = os.getpid()\n terminate_tree(pid)\n\n\ndef check_database(dbdir):\n \"\"\"check existence of database blastp, diamond and hmm files\"\"\"\n if dbdir is None:\n if \"COILDB\" not in os.environ:\n msg = \"Error: database dir not specified\\nUse -d or set CHECKVDB environmental variable\"\n sys.exit(msg)\n else:\n dbdir = os.environ[\"CHECKVDB\"]\n dbdir = os.path.abspath(dbdir)\n if not os.path.exists(dbdir):\n msg = f\"Error: database dir not found '{dbdir}'\"\n sys.exit(msg)\n files = [\n \"blastdb/phrogs.*\",\n \"diamonddb/phrogs.*\",\n \"hmmdb/phrogs_with_annot*\"\n ]\n for f in files:\n path = os.path.join(dbdir, f)\n if not glob.glob(path):\n msg = f\"Error: database file not found '{path}'\"\n sys.exit(msg)\n return dbdir\n\n\ndef read_fasta(path):\n \"\"\"Read fasta file and yield (header, sequence)\"\"\"\n filepath_compression = is_compressed(path)\n if filepath_compression == Compression.gzip:\n f = gzip.open(path, \"rt\")\n elif filepath_compression == Compression.bzip2:\n f = bz2.open(path, \"rt\")\n elif filepath_compression == Compression.xz:\n f = lzma.open(path, \"rt\")\n else:\n f = open(path, \"r\")\n for record in SeqIO.parse(f, \"fasta\"):\n name = record.description\n seq = str(record.seq).upper()\n if name != \"\" and seq != \"\":\n yield name, seq\n f.close()\n\n\n\ndef run_prodigal(out):\n cmd = \"prodigal-gv \"\n cmd += \" -m \"\n cmd += \"-p meta \"\n cmd += f\"-i {out}.fna \"\n cmd += f\"-a {out}.faa \"\n cmd += f\"-f gff \"\n cmd += f\"-o {out}.gff \"\n cmd += \"1> /dev/null \"\n cmd += f\"2> {out}.log\"\n with open(f\"{out}.cmd\", \"w\") as file:\n file.write(cmd + \"\\n\")\n p = sp.Popen(cmd, shell=True)\n return_code = p.wait()\n return return_code == 0\n\ndef run_trnascan(out):\n cmd = \"tRNAscan-SE \"\n cmd += \" -G \"\n cmd += \"-p meta \"\n cmd += f\"-o {out}_trnascan.tsv \"\n cmd += f\"-i {out}.fna \"\n cmd += \"1> /dev/null \"\n cmd += f\"2> {out}.log\"\n with open(f\"{out}.cmd\", \"w\") as file:\n file.write(cmd + \"\\n\")\n p = sp.Popen(cmd, shell=True)\n return_code = p.wait()\n return return_code == 0\n\ndef run_diamond(out, db, faa, tmp, threads):\n cmd = \"diamond blastp \"\n cmd += \"--outfmt 6 \"\n cmd += \"--evalue 1e-5 \"\n cmd += \"--query-cover 50 \"\n cmd += \"--subject-cover 50 \"\n cmd += \"-k 10000 \"\n cmd += f\"--query {faa} \"\n cmd += f\"--db {db} \"\n cmd += f\"--threads {threads} \"\n cmd += f\"> {out} \"\n cmd += f\"2> {tmp}.log\"\n with open(f\"{tmp}.cmd\", \"w\") as file:\n file.write(cmd + \"\\n\")\n p = sp.Popen(cmd, shell=True)\n return_code = p.wait()\n return return_code == 0\n\ndef run_ffindex_build(out,faa_dir):\n '''faa_dir must contain individual .faa files'''\n cmd = \"ffindex_build\"\n cmd += \"-s\"\n cmd += f\"{out}.ffdata \"\n cmd += f\"{out}.ffindex \"\n cmd += f\"{faa_dir} \"\n cmd += f\"2> {out}.log \"\n with open(f\"{out}.cmd\", \"w\") as file:\n file.write(cmd + \"\\n\")\n p = sp.Popen(cmd, shell=True)\n return_code = p.wait()\n return return_code == 0\n\ndef run_ffindex_from_fasta(out,faa):\n '''expect input to be a single multi faa file'''\n cmd = \"ffindex_from_fasta \"\n cmd += \"-s \"\n cmd += f\"{out}.ffdata \"\n cmd += f\"{out}.ffindex \"\n cmd += f\"{faa} \"\n cmd += f\"2> {out}.ffindex.log \"\n with open(f\"{out}.ffindex.cmd\", \"w\") as file:\n file.write(cmd + \"\\n\")\n p = sp.Popen(cmd, shell=True)\n return_code = p.wait()\n return return_code == 0 \n\ndef run_hhblits_omp(out, db, faa,threads=0, evalue=0.001):\n #-i ./ffindex/phage_msa -d ./tmp/phage -e 0.001 -cpu 12 -z 3 -Z 3 -b 0 -B 0 -v 1 -M 50 -o outfile\n cmd = \"hhblits_omp \"\n cmd += \"-i \"\n cmd += f\"{faa} \"\n cmd += \"-d \"\n cmd += f\"{db} \"\n cmd += f\"-e {evalue} \"\n cmd += f\"-blasttab {out}.tbl \"\n cmd += f\"-cpu {threads} \"\n cmd += f\"-z 3 \"\n cmd += f\"-Z 3 \"\n cmd += f\"-b 0 \"\n cmd += f\"-B 0 \"\n cmd += \"-M 50 \"\n cmd += f\"2> {out}.log \"\n with open(f\"{out}.cmd\", \"w\") as file:\n file.write(cmd + \"\\n\")\n p = sp.Popen(cmd, shell=True)\n return_code = p.wait()\n return return_code == 0\n\n\ndef run_hmmsearch(out, db, faa, threads=2, evalue=10):\n cmd = \"hmmsearch \"\n cmd += \"--noali \"\n cmd += \"-o /dev/null \"\n cmd += f\"-E {evalue} \"\n cmd += f\"--tblout {out} \"\n cmd += f\"--cpu {threads} \"\n cmd += f\"{db} \"\n cmd += f\"{faa} \"\n cmd += f\"2> {out}.log \"\n with open(f\"{out}.cmd\", \"w\") as file:\n file.write(cmd + \"\\n\")\n p = sp.Popen(cmd, shell=True)\n return_code = p.wait()\n return return_code == 0\n\ndef search_hmms(tmp_dir,prefix,threads, db_dir):\n # make tmp\n hmm_dir = os.path.join(tmp_dir, f\"{prefix}_hmmsearch\")\n if not os.path.exists(hmm_dir):\n os.makedirs(hmm_dir)\n # list faa files\n faa = [\n file\n for file in os.listdir(os.path.join(tmp_dir, \"proteins\"))\n if file.split(\".\")[-1] == \"faa\"\n ]\n # list splits to process\n # splits = []\n # for file in os.listdir(db_dir):\n # split = file.split(\".\")[0]\n # out = os.path.join(hmm_dir, f\"{split}.hmmout\")\n # # file doesn't exist; add to list for processing\n # if not os.path.exists(out):\n # splits.append(split)\n # # check if file is complete\n # else:\n # x = False\n # with open(out) as subf:\n # for line in subf:\n # if line == \"# [ok]\\n\":\n # x = True\n # if not x:\n # splits.append(split)\n # run hmmer\n # print(splits)\n logging.info('running hmmsearch')\n args_list = []\n # for split in splits:\n # out = os.path.join(hmm_dir, f\"{split}.{prefix}.hmmout\")\n # hmmdb = os.path.join(db_dir, f\"{split}.hmm\")\n # faa = os.path.join(tmp_dir, \"proteins.faa\")\n # args_list.append([out, hmmdb, faa])\n for f in faa:\n split=f.split('.')[0]\n out = os.path.join(hmm_dir, f\"proteins.{prefix}.hmmout\")\n hmmdb = db_dir\n faa = os.path.join(tmp_dir, \"proteins.faa\")\n args_list.append([out, hmmdb, faa]) \n results = async_parallel(run_hmmsearch, args_list, threads)\n if not all(results):\n num_fails = len(results) - sum(results)\n sys.exit(\n f\"\\nError: {num_fails} hmmsearch tasks failed. Program should be rerun.\"\n )\n # check outputs are complete\n logging.info('checking the outputs are complete')\n complete = []\n for file in os.listdir(hmm_dir):\n if file.split(\".\")[-1] == \"hmmout\":\n x = False\n with open(os.path.join(hmm_dir, file)) as subf:\n for line in subf:\n if line == \"# [ok]\\n\":\n x = True\n complete.append(x)\n num_fails = complete.count(False)\n if num_fails > 0:\n sys.exit(\n f\"\\nError: {num_fails}/80 hmmsearch tasks failed. Program should be rerun.\"\n )\n # cat output\n logging.info('gathering search results')\n with open(os.path.join(tmp_dir, f\"hmmsearch.{prefix}.txt\"), \"w\") as f:\n for file in os.listdir(hmm_dir):\n if file.split(\".\",1)[-1] == f\"{prefix}.hmmout\":\n with open(os.path.join(hmm_dir, file)) as subf:\n for line in subf:\n f.write(line)\n\n\ndef search_hmms_hhsuite(tmp_dir, threads, db_dir):\n # make tmp\n hmm_dir = os.path.join(tmp_dir, \"hhsuite\")\n index_dir = os.path.join(hmm_dir,\"index\")\n hhblits_dir = os.path.join(hmm_dir,\"hhblits\")\n for dir in [hmm_dir,index_dir,hhblits_dir]:\n if not os.path.exists(dir):\n os.makedirs(dir)\n # list faa files\n all_proteins = os.path.join(tmp_dir, \"proteins.faa\")\n # build index\n logging.info('builing index for parallel execution of hhblits')\n index_file = os.path.join(index_dir,\"index\")\n result = run_ffindex_from_fasta(index_file,all_proteins)\n if not result:\n sys.exit(\n logging.error(f\"\\nError: building index for hhblits. Program should be rerun.\")\n )\n # run hhblits\n logging.info('hmmdb search started')\n hhout = os.path.join(hhblits_dir, \"hhblits\")\n result = run_hhblits_omp(out=hhout, db=db_dir, faa=index_file,threads=12, evalue=0.001)\n if not result:\n sys.exit(\n logging.error(f\"\\nError: hhbits failed to run. Program should be rerun.\")\n )\n logging.info('hmmdb search ended')\n out = os.path.join(tmp_dir,'hhblits.tsv')\n #extracting search results\n logging.info('unpacking search results')\n with open(hhout+'.tbl.ffdata','r') as fh, open(out,'w') as wh:\n wh.write('query\\ttarget\\t#match/tLen\\talnLen\\t#mismatch\\t#gapOpen\\tqstart\\tqend\\ttstart\\ttend\\teval\\tscore\\n')\n for line in fh:\n wh.write(line.strip('\\x00'))\n \n\ndef call_genes(in_fna, out_dir, threads,trna=True):\n # make tmp dir\n logging.info('gene calling started')\n tmp = f\"{out_dir}/tmp/proteins\"\n if not os.path.exists(tmp):\n os.makedirs(tmp)\n # count seqs in fasta\n num_seqs = sum(1 for _ in read_fasta(in_fna))\n # split fna into equal sized chunks\n split_size = int(math.ceil(1.0 * num_seqs / threads))\n iteration = 1\n count = 0\n out = open(os.path.join(tmp, f\"{iteration}.fna\"), \"w\")\n for id, seq in read_fasta(in_fna):\n # check if new file should be opened\n if count == split_size:\n count = 0\n iteration += 1\n out = open(os.path.join(tmp, f\"{iteration}.fna\"), \"w\")\n # write seq to file\n out.write(\">\" + id + \"\\n\" + seq + \"\\n\")\n count += 1\n out.close()\n # call genes\n args_list = []\n for i in range(1, iteration + 1):\n out = os.path.join(tmp, str(i))\n args_list.append([out])\n results = async_parallel(run_prodigal, args_list, threads)\n if not all(results):\n num_fails = len(results) - sum(results)\n sys.exit(\n logging.error(f\"\\nError: {num_fails} prodigal tasks failed. Program should be rerun.\")\n )\n if trna:\n logging.info('calling trna genes')\n args_list = []\n for i in range(1, iteration + 1):\n out = os.path.join(tmp, str(i))\n args_list.append([out])\n results = async_parallel(run_trnascan, args_list, threads)\n if not all(results):\n num_fails = len(results) - sum(results)\n sys.exit(\n logging.error(f\"\\nError: {num_fails} tRNAscan-SE tasks failed. Program should be rerun.\")\n )\n\n # cat output faa\n # mapping = dict()\n with open(f\"{tmp}.faa\", \"w\") as f:\n for i in range(1, iteration + 1):\n # avoid trying to read empty fasta file\n if i <= threads:\n with open(os.path.join(tmp, f\"{i}.faa\")) as subf:\n j = 0\n for line in subf:\n #if line[0] == '>':\n #j += 1\n #linex = line.split('cov')[0] + f'{j}\\n'\n #mapping[line] = linex\n f.write(line)\n # with open(f'{tmp}.pkl', 'wb') as f:\n # pickle.dump(mapping, f)\n\n #cat output gff\n with open(f\"{tmp}.gff\", \"w\") as f:\n f.write('##gff-version 3\\n')\n for i in range(1, iteration + 1):\n # avoid trying to read empty fasta file\n if i <= threads:\n with open(os.path.join(tmp, f\"{i}.gff\")) as subf:\n j = 0\n for index,line in enumerate(subf):\n if index > 0:\n f.write(line)\n #cat output trnascan\n with open(f\"{tmp}_trnascan.tsv\", \"w\") as f:\n for i in range(1, iteration + 1):\n # avoid trying to read empty fasta file\n if i <= threads:\n with open(os.path.join(tmp, f\"{i}_trnascan.tsv\")) as subf:\n j = 0\n for line in subf:\n #if line[0] == '>':\n #j += 1\n #linex = line.split('cov')[0] + f'{j}\\n'\n #mapping[line] = linex\n f.write(line)\n\n\ndef parse_blastp(path):\n with open(path) as f:\n names = [\n \"qname\",\n \"tname\",\n \"pid\",\n \"aln\",\n \"mis\",\n \"gap\",\n \"qstart\",\n \"qstop\",\n \"tstart\",\n \"tstop\",\n \"eval\",\n \"score\",\n ]\n formats = [str, str, float, int, int, int, int, int, int, int, float, float]\n for line in f:\n values = line.split()\n yield dict([(names[i], formats[i](values[i])) for i in range(12)])\n\n\ndef parse_hmmsearch(path):\n with open(path) as f:\n names = [\n \"qname\",\n \"qacc\",\n \"tname\",\n \"tacc\",\n \"eval\",\n \"score\",\n \"bias\",\n \"beval\",\n \"bscore\",\n \"bbias\",\n ]\n formats = [str, str, str, str, float, float, float, float, float, float]\n for line in f:\n if not line.startswith(\"#\"):\n values = line.split()\n try:\n yield dict([(names[i], formats[i](values[i])) for i in range(10)])\n except:\n print(\"skipping erroneous line\")\n\n#parse tRNAscan-SE output file\ndef parse_trna(path):\n with open(path) as f:\n names = [\n \"qname\",\n \"trna_no\",\n \"begin\",\n \"end\",\n \"trna_type\",\n \"anticodon\",\n \"intron_begin\",\n \"intron_end\",\n \"score\",\n \"bias\",\n ]\n formats = [str, int, int, int, str, str, int, int, float]\n for line in f:\n if not (line.startswith(\"Sequence\") or line.startswith(\"Name\") or line.startswith(\"-----\")):\n values = line.split()\n try:\n yield dict([(names[i], formats[i](values[i])) for i in range(9)])\n except Exception as e:\n print(f\"{e}/;skipping erroneous line\")\ndef get_cordinates(x):\n if x['begin'] > x['end']:\n return pd.Series([x[\"qname\"],x[\"trna_no\"],x['end'],x['begin'],-1,x[\"trna_type\"],x[\"score\"]], index=[\"contig\",\"trna_no\",\"begin\",\"end\",\"strand\",\"trna_type\",\"score\"])\n else:\n return pd.Series([x[\"qname\"],x[\"trna_no\"],x['begin'],x['end'],1,x[\"trna_type\"],x[\"score\"]], index=[\"contig\",\"trna_no\",\"begin\",\"end\",\"strand\",\"trna_type\",\"score\"])\n\ndef create_feature(x):\n qualifiers = {\n \"source\": \"tRNAscan-SE\",\n \"score\": x[\"score\"],\n \"trna_type\": x[\"trna_type\"],\n \"score\" : x[\"score\"],\n \"label\" : x[\"trna_type\"]+\"_tRNA\",\n \"ID\": \"trna_\"+str(x[\"trna_no\"]),\n }\n\n return (SeqFeature(FeatureLocation(x[\"begin\"], x[\"end\"]), type=\"tRNA\",id=str(x[\"trna_no\"]), strand=x[\"strand\"], qualifiers=qualifiers))\n\n\ndef generate_plots(tmp_dir, hmmsearch_dir, trna_dir ,meta_dir,gff_dir): \n #check if tmp/plots exists, eles create the dir\n \n plots_dir = os.path.join(tmp_dir, \"plots\")\n if not os.path.exists(plots_dir):\n os.makedirs(plots_dir)\n #process hmmsearch results\n logging.info('processing hmm results')\n search_results=pd.DataFrame(parse_hmmsearch(hmmsearch_dir))\n trna=pd.DataFrame(parse_trna(trna_dir))\n if search_results.empty:\n sys.stderr.write('Exiting because hmmsearch returned zero matches!')\n sys.exit( )\n \n if not trna.empty:\n trna=trna.apply(lambda x : get_cordinates(x) , axis=1).sort_values(by=[\"contig\",\"begin\"])\n else:\n trna = None\n\n phrogs_anno=pd.read_table(meta_dir)\n phrogs_anno=phrogs_anno.fillna('unknown function')\n search_results=pd.DataFrame(parse_hmmsearch(hmmsearch_dir))\n phrogs_anno['phrog']=phrogs_anno['phrog'].apply(lambda x : f'phrog_{x}')\n results_with_annotate = search_results.merge(phrogs_anno, how='inner', left_on='tname', right_on='phrog')\n results_with_annotate['position'] = results_with_annotate['qname'].apply(lambda x: int(x.split('_')[-1]))\n results_with_annotate['contig'] = results_with_annotate['qname'].apply(lambda x: x.rsplit('_',1)[0])\n \n results_filtered = results_with_annotate.iloc[results_with_annotate.groupby('qname')['score'].idxmax()].query('score > 50')\n\n logging.info('generating annotation plots')\n gff_out_dir = os.path.join(tmp_dir, \"proteins_annot.gff\")\n with open(gff_out_dir, \"w\") as out_handle:\n \n for i in GFF.parse(gff_dir):\n \n \n tmp = results_filtered.query(f\"contig == '{i.id}'\")\n\n for pos,feature in enumerate(i.features, start=1):\n tmp_feature = tmp.query(f\"position == {pos}\")[[\"category\",\"color\",\"annot\",\"phrog\",\"score\",\"eval\"]]\n if not tmp_feature.empty:\n #print(tmp_feature[\"annot\"])\n feature.qualifiers.update({\"label\":tmp_feature[\"annot\"].values[0]})\n feature.qualifiers.update({\"category\":tmp_feature[\"category\"].values[0]})\n feature.qualifiers.update({\"eval\":tmp_feature[\"eval\"].values[0]})\n feature.qualifiers.update({\"score\":tmp_feature[\"score\"].values[0]})\n feature.qualifiers.update({\"color\":tmp_feature[\"color\"].values[0]})\n feature.qualifiers.update({\"phrog\":tmp_feature[\"phrog\"].values[0]})\n else:\n feature.qualifiers.update({\"label\":\"unknown function\"})\n feature.qualifiers.update({\"color\": \"#c9c9c9\"})\n \n #create trna features\n if trna is not None:\n tmp_trna = trna.query(f\"contig == '{i.id}'\")\n tmp_trna=tmp_trna.reset_index(drop=True)\n if not tmp_trna.empty:\n tmp_trna[\"feature\"]=tmp_trna.apply(lambda x : create_feature(x), axis=1)\n i.features.extend(tmp_trna[\"feature\"].to_list())\n #write updated to gff record to a file\n graphic_record = BiopythonTranslator().translate_record(i)\n GFF.write([i], out_handle)\n for feat in graphic_record.features: #turns off the labels of cds' without annotations\n if feat.label == \"unknown function\":\n feat.label =None\n fig, ax1 = plt.subplots(1, 1, figsize=(15, 4))\n fig.tight_layout(pad=2.5) \n ax, _ = graphic_record.plot(ax=ax1, strand_in_label_threshold=7,annotate_inline=False,figure_height=3 )\n ax.set_title(i.id)\n out_name = os.path.join(plots_dir, f\"{i.id}.png\")\n ax.figure.savefig(out_name, bbox_inches='tight')\n plt.clf()\n plt.close(\"all\")\n\n","repo_name":"Yasas1994/phage_contig_annotator","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":24348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73148551306","text":"import re\n\ndef timeToNumber(h, m, s, ss, t):\n end = int(h) *3600*1000 + int(m)*60*1000 + int(s)*1000 + int(ss)\n \n T = int(float(t)*1000)\n start= end - T + 1\n if(start<0):\n return [0, end]\n else:\n return [start, end]\n\ndef solution(lines):\n answer = 0\n ans = []\n \n for line in lines: \n (h, m, s, ss, t) = re.search('(\\d\\d):(\\d\\d):(\\d\\d).(\\d\\d\\d) (\\d+|\\d+.\\d+)s', line).groups()\n ans.append(timeToNumber(h, m, s, ss, t))\n maxi = 0\n for ind, [start, end] in enumerate(ans):\n fin = end + 1000\n cnt = 0\n for newStart, newEnd in ans[ind:]:\n \n if(newStart None | Exception:\n \"\"\" Поворачивает изображение на указанную градусную меру против часовой стрелки\"\"\"\n img = Image.open(ut.get_path(user, 'get', 'jpg'))\n rotated_image = img.rotate(degrees, expand=True, fillcolor=(255, 255, 255))\n return ut.try_to_save(rotated_image, user, 'jpg')\n\n\ndef black_and_white(user: str) -> None | Exception:\n im = Image.open(ut.get_path(user, 'get', 'jpg'))\n pixels = im.load()\n x, y = im.size\n for x_i in range(x):\n for y_i in range(y):\n r, g, b = pixels[x_i, y_i]\n average_rgb = int((r + g + b) / 3)\n if average_rgb < 126:\n pixels[x_i, y_i] = (0, 0, 0)\n else:\n pixels[x_i, y_i] = (255, 255, 255)\n return ut.try_to_save(im, user, 'jpg')\n\n\ndef gray_scale(user: str) -> None | Exception:\n im = Image.open(ut.get_path(user, 'get', 'jpg'))\n pixels = im.load()\n x, y = im.size\n for x_i in range(x):\n for y_i in range(y):\n r, g, b = pixels[x_i, y_i]\n average_rgb = int((0.3 * r) + (0.59 * g) + (0.11 * b))\n pixels[x_i, y_i] = (average_rgb, average_rgb, average_rgb)\n return ut.try_to_save(im, user, 'jpg')\n\n\ndef check_count_pixels(user: str) -> int:\n size = Image.open(ut.get_path(user, 'get', 'jpg')).size\n return size[0] * size[1] <= 1000 ** 2\n\n\ndef count_unique(image: str, user: str, translate, lang: str) -> list:\n \"\"\" Подсчет пикселей, распределение по группам, отрисовка вывода\"\"\"\n\n # Подсчет пикселей и группировка\n with open('data/json/colors.json') as file:\n color_dict = json.load(file)\n image = Image.open(image)\n x, y = image.size\n q = 0\n pixels = image.load()\n unique_pixels = dict()\n for x_i in range(x):\n for y_i in range(y):\n r, g, b = pixels[x_i, y_i]\n q += 1\n for key, value in color_dict.items():\n if value[0] <= r and value[1] <= g and value[2] <= b:\n if key in unique_pixels:\n unique_pixels[key].append((x_i, y_i))\n break\n else:\n unique_pixels[key] = [(x_i, y_i)]\n break\n sorted_ = (sorted(unique_pixels.items(), key=lambda item: len(item[1]), reverse=True))[:48]\n\n # Создание изображения для вывода пользователю\n pixels_count, pixel_size = 20, 15\n width = round(len(sorted_) / 16) * 266\n pixels_pic = Image.new('RGB', (width, 500), 'white')\n font = ImageFont.truetype('data/fonts/tnr.ttf', size=13)\n draw = ImageDraw.Draw(pixels_pic)\n cord_x, cord_y = 30, 20\n for index, item in enumerate(sorted_):\n name = item[0]\n count = len(item[1])\n rgb_value = tuple(color_dict[name][0:3])\n draw.text((cord_x - pixel_size - 5, cord_y), text=str(index + 1), font=font, fill='black')\n draw.rectangle((cord_x, cord_y, cord_x + pixel_size, cord_y + pixel_size), fill=rgb_value)\n draw.line(((cord_x - 1, cord_y - 1), (cord_x + pixel_size + 1, cord_y - 1),\n (cord_x + 1 + pixel_size, cord_y + 1 + pixel_size),\n (cord_x - 1, cord_y + 1 + pixel_size), (cord_x - 1, cord_y - 1)), fill='black', width=1)\n if lang != 'en':\n draw.text((cord_x + pixel_size * 2, cord_y), text=f'{translate(text=name, dest=lang).text}',\n font=font, fill='black')\n else:\n draw.text((cord_x + pixel_size * 2, cord_y), text=f'{name}', font=font, fill='black')\n draw.text((cord_x + pixel_size * 10, cord_y), text=f'{count} pixels', font=font, fill='black')\n cord_y += 30\n if cord_y >= 500:\n cord_x, cord_y = cord_x + 265, 20\n ut.try_to_save(pixels_pic, user, 'jpg')\n return sorted_\n\n\ndef alpha_image(positions: list, user: str) -> None | Exception:\n \"\"\" Делает пиксели по координатам из positions прозрачными. Результат сохраняет\"\"\"\n img = Image.open(ut.get_path(user, 'get', 'jpg'))\n img_new = Image.new('RGBA', img.size, (0, 0, 0, 0))\n img_new.paste(img)\n pixels_new = img_new.load()\n for pos in positions:\n x, y = pos\n pixels_new[x, y] = (0, 0, 0, 0)\n return ut.try_to_save(img_new, user, 'png')\n\n\ndef to_png(user: str) -> None | Exception:\n \"\"\" Меняет формат изображения на png\"\"\"\n path = f\"data/photos/get/{user}.jpg\"\n new_path = path.split('.')[0] + 'png'\n img = Image.open(path)\n return ut.try_to_save(img, new_path, 'png')\n\n\ndef change_color(positions: list, to: tuple, user: str) -> None | Exception:\n \"\"\" Меняет все пиксели по координатам из positions на пиксели со значением to. Результат сохраняет\"\"\"\n img = Image.open(ut.get_path(user, 'get', 'jpg'))\n pixels = img.load()\n for pos in positions:\n x, y = pos\n pixels[x, y] = to\n return ut.try_to_save(img, user, 'jpg')\n\n\ndef change_filepaths(user: str) -> Exception | list:\n \"\"\" Меняет местами фото для отправки и фото полученное с целью продолжения работы\"\"\"\n img_send = Image.open(ut.get_path(user, 'to_send', 'jpg'))\n try:\n img_send.save(ut.get_path(user, 'get', 'jpg'))\n except IOError as error:\n return error\n return count_unique(ut.get_path(user, 'get', 'jpg'), user)\n\n\ndef resize(user: str, percents: int):\n \"\"\" Изменяет размер фотографии на (percents + 100) процентов. Результат сохраняет\"\"\"\n image = cv2.imread(ut.get_path(user, 'get', 'jpg'), cv2.IMREAD_UNCHANGED)\n if percents in range(-99, 501) and percents != 0:\n percents += 100\n width = int(image.shape[1] * percents / 100)\n height = int(image.shape[0] * percents / 100)\n dim = (width, height)\n resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n try:\n cv2.imwrite(ut.get_path(user, 'to_send', 'jpg'), resized)\n except IOError as error:\n return error\n return None\n else:\n return 'Я не люблю когда надо мной так шутят'\n","repo_name":"Sheisuka/ImagiroBot","sub_path":"PP.py","file_name":"PP.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11798608402","text":"import os\nfrom RAiDER.constants import _ZREF, _CUBE_SPACING_IN_M\n\nclass AttributeDict(dict):\n __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\nDEFAULT_DICT = AttributeDict(\n dict(\n look_dir='right',\n date_start=None,\n date_end=None,\n date_step=None,\n date_list=None,\n time=None,\n end_time=None,\n weather_model=None,\n lat_file=None,\n lon_file=None,\n station_file=None,\n bounding_box=None,\n geocoded_file=None,\n dem=None,\n use_dem_latlon=False,\n height_levels=None,\n height_file_rdr=None,\n ray_trace=False,\n zref=_ZREF,\n cube_spacing_in_m=_CUBE_SPACING_IN_M,\n los_file=None,\n los_convention='isce',\n los_cube=None,\n orbit_file=None,\n verbose=True,\n raster_format='GTiff',\n file_format='GTiff',\n download_only=False,\n output_directory='.',\n weather_model_directory=None,\n output_projection='EPSG:4326',\n interpolate_time='center_time',\n )\n )\n","repo_name":"dbekaert/RAiDER","sub_path":"tools/RAiDER/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"81"} +{"seq_id":"14288152375","text":"from django.http import HttpResponse\nfrom django.utils import simplejson\nfrom django.shortcuts import render, get_object_or_404\nfrom projects.models import Project\n\ndef list(request):\n project_list = Project.objects.all()\n response = [{'slug': project.slug,\n 'name': project.name,\n 'background': project.background,\n 'description': project.description,\n 'photos' : [{'url' : f.image.url,\n 'title' : f.title,\n 'description' : f.description} for f in project.photos.all()]} for project in project_list]\n return HttpResponse(simplejson.dumps(response) , mimetype=\"application/json\")\n\ndef project(request, project_name):\n project = get_object_or_404(Project, slug=project_name)\n files = project.photos.all()\n\n response = {'name': project.name,\n 'photos': [{'url': f.image.url,\n 'title': f.title,\n 'description': f.description\n } for f in files],\n 'url' : project.url,\n 'industry' : project.industry,\n 'background' : project.background,\n 'description': project.description}\n return HttpResponse(simplejson.dumps(response), mimetype=\"application/json\")\n","repo_name":"Bryan-Turek/portfolio","sub_path":"projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70626785546","text":"\"\"\"Given a string s consisting of lowercase English letters, return the first letter\r\n to appear twice.\r\n\r\nNote:\r\n\r\nA letter a appears twice before another letter b if the second occurrence of a is \r\nbefore the second occurrence of b.\r\ns will contain at least one letter that appears twice.\"\"\"\r\n\r\n\r\nclass Solution:\r\n def repeatedCharacter(self, s):\r\n result = []\r\n for char in s:\r\n if char not in result:\r\n result.append(char)\r\n else:\r\n return char","repo_name":"Johnsonj302/leetcode-problems","sub_path":"2351.py","file_name":"2351.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13956281789","text":"import numpy as np\nimport pandas as pd\nfrom lexos.helpers.error_messages import EMPTY_DTM_MESSAGE\nfrom lexos.models.statistics_model import StatsModel, StatsTestOptions\n\n# ------------------------ First test suite ------------------------\nfrom lexos.receivers.statistics_receiver import StatsFrontEndOption\n\ntest_dtm_one = pd.DataFrame(data=np.array([(40, 20, 15, 5, 0, 0, 0, 0, 0),\n (0, 0, 0, 0, 1, 2, 3, 4, 5)]),\n index=np.array([0, 1]),\n columns=np.array([\"A\", \"B\", \"C\", \"D\", \"E\", \"F\",\n \"G\", \"H\", \"I\"]))\ntest_id_temp_table_one = {0: \"F1.txt\", 1: \"F2.txt\"}\ntest_front_end_option_one = StatsFrontEndOption(active_file_ids=[0, 1],\n sort_ascending=True,\n sort_column=0,\n text_color=\"#000000\",\n highlight_color=\"#000000\")\ntest_option_one = StatsTestOptions(\n token_type_str=\"Tokens\",\n doc_term_matrix=test_dtm_one,\n front_end_option=test_front_end_option_one,\n document_label_map=test_id_temp_table_one)\ntest_stats_model_one = StatsModel(test_options=test_option_one)\ntest_corpus_result_one = test_stats_model_one.get_corpus_stats()\ntest_file_result_one = test_stats_model_one.get_document_statistics()\n# noinspection PyProtectedMember\ntest_box_plot_result_one = test_stats_model_one._get_box_plot_object()\ntest_pandas_one = pd.DataFrame(test_file_result_one[\"statistics-table-body\"])\n# ------------------------------------------------------------------\n\n# ------------------------ Second test suite -----------------------\ntest_dtm_two = pd.DataFrame(\n data=np.array([(40, 20, 15, 5, 0, 0, 0, 0, 0, 0, 0, 0),\n (0, 0, 0, 0, 1, 2, 3, 4, 5, 0, 0, 0),\n (0, 0, 0, 0, 0, 0, 0, 0, 10, 11, 12, 13)]),\n index=np.array([0, 1, 2]),\n columns=np.array([\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\",\n \"I\", \"J\", \"K\", \"L\"]))\ntest_id_temp_table_two = {0: \"F1.txt\", 1: \"F2.txt\", 2: \"F3.txt\"}\ntest_stats_front_end_option_two = \\\n StatsFrontEndOption(active_file_ids=[0, 1, 2],\n sort_ascending=True,\n sort_column=0,\n text_color=\"#000000\",\n highlight_color=\"#000000\")\ntest_option_two = StatsTestOptions(\n token_type_str=\"Characters\",\n doc_term_matrix=test_dtm_two,\n front_end_option=test_stats_front_end_option_two,\n document_label_map=test_id_temp_table_two)\ntest_stats_model_two = StatsModel(test_options=test_option_two)\ntest_corpus_result_two = test_stats_model_two.get_corpus_stats()\ntest_file_result_two = test_stats_model_two.get_document_statistics()\ntest_box_plot_result_two = test_stats_model_two.get_box_plot()\ntest_pandas_two = pd.DataFrame(test_file_result_two[\"statistics-table-body\"])\n# ------------------------------------------------------------------\n\n# ------------------- test suite for anomaly test ------------------\ntest_dtm_anomaly = pd.DataFrame(\n data=np.array([(1, 1), (50, 50), (50, 50), (50, 50), (50, 50),\n (50, 50), (50, 50), (50, 50), (50, 50), (100, 100)]),\n index=np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n columns=np.array([\"A\", \"B\"]))\ntest_id_temp_table_anomaly = \\\n {0: \"F1.txt\", 1: \"F2.txt\", 2: \"F3.txt\", 3: \"F4.txt\", 4: \"F5.txt\",\n 5: \"F6.txt\", 6: \"F7.txt\", 7: \"F8.txt\", 8: \"F9.txt\", 9: \"F10.txt\"}\ntest_stats_front_end_option_anomaly = \\\n StatsFrontEndOption(active_file_ids=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n sort_ascending=True,\n sort_column=0,\n text_color=\"#000000\",\n highlight_color=\"#000000\")\ntest_option_anomaly = \\\n StatsTestOptions(token_type_str=\"Characters\",\n doc_term_matrix=test_dtm_anomaly,\n front_end_option=test_stats_front_end_option_anomaly,\n document_label_map=test_id_temp_table_anomaly)\ntest_stats_model_anomaly = StatsModel(test_options=test_option_anomaly)\ntest_corpus_result_anomaly = test_stats_model_anomaly.get_corpus_stats()\ntest_file_result_anomaly = test_stats_model_anomaly.get_document_statistics()\ntest_box_plot_anomaly = test_stats_model_anomaly.get_box_plot()\ntest_pandas_anomaly = pd.DataFrame(\n test_file_result_anomaly[\"statistics-table-body\"])\n\n\n# ------------------------------------------------------------------\nclass TestFileResult:\n def test_basic_info(self):\n assert test_pandas_one[0][0] == \"F1.txt\"\n assert test_pandas_one[0][1] == \"F2.txt\"\n\n def test_hapax(self):\n assert test_pandas_one[1][0] == 0\n assert test_pandas_one[1][1] == 1\n\n def test_total_words(self):\n assert test_pandas_one[2][0] == 80\n assert test_pandas_one[2][1] == 15\n\n def test_average(self):\n assert test_pandas_one[3][0] == 0.05\n assert test_pandas_one[3][1] == 0.333\n\n def test_distinct_words(self):\n assert test_pandas_one[4][0] == 4\n assert test_pandas_one[4][1] == 5\n\n\nclass TestCorpusInfo:\n def test_average(self):\n assert test_corpus_result_one.mean == 47.5\n assert test_corpus_result_two.mean == 47\n\n def test_std(self):\n assert test_corpus_result_one.std_deviation == 45.96\n assert test_corpus_result_two.std_deviation == 32.51\n\n def test_quartiles(self):\n assert test_corpus_result_one.inter_quartile_range == 65\n assert test_corpus_result_two.inter_quartile_range == 48.75\n\n def test_file_anomaly_iqr(self):\n assert test_corpus_result_one.anomaly_iqr.small_items == []\n assert test_corpus_result_one.anomaly_iqr.large_items == []\n assert test_corpus_result_two.anomaly_iqr.small_items == []\n assert test_corpus_result_anomaly.anomaly_iqr.small_items == [\"F1.txt\"]\n assert \\\n test_corpus_result_anomaly.anomaly_iqr.large_items == [\"F10.txt\"]\n\n def test_file_anomaly_std(self):\n assert test_corpus_result_one.anomaly_se.small_items == []\n assert test_corpus_result_two.anomaly_se.large_items == []\n assert test_corpus_result_anomaly.anomaly_se.small_items == [\"F1.txt\"]\n assert test_corpus_result_anomaly.anomaly_se.large_items == [\"F10.txt\"]\n\n def test_file_unit(self):\n assert test_corpus_result_one.unit == \"Tokens\"\n assert test_corpus_result_two.unit == \"Characters\"\n\n\n# -------------------- Empty data frame case test suite ---------------------\ntest_dtm_empty = pd.DataFrame()\ntest_id_temp_table_empty = {}\ntest_stats_front_end_option_empty = StatsFrontEndOption(\n active_file_ids=[],\n sort_ascending=True,\n sort_column=0,\n text_color=\"#000000\",\n highlight_color=\"#000000\")\ntest_option_empty = \\\n StatsTestOptions(token_type_str=\"Tokens\",\n doc_term_matrix=test_dtm_empty,\n front_end_option=test_stats_front_end_option_empty,\n document_label_map=test_id_temp_table_empty)\ntest_stats_model_empty = StatsModel(test_options=test_option_empty)\n\n\nclass TestSpecialCase:\n def test_empty_list(self):\n try:\n _ = test_stats_model_empty.get_document_statistics()\n raise AssertionError(\"Empty input error message did not raise\")\n except AssertionError as error:\n assert str(error) == EMPTY_DTM_MESSAGE\n\n try:\n _ = test_stats_model_empty.get_corpus_stats()\n raise AssertionError(\"Empty input error message did not raise\")\n except AssertionError as error:\n assert str(error) == EMPTY_DTM_MESSAGE\n\n\n# -------------------- Plotly result test suite -----------------------------\nbasic_fig = test_box_plot_result_one\n\n\nclass TestStatsPlotly:\n def test_get_stats_scatter(self):\n assert basic_fig['data'][0]['type'] == 'scatter'\n\n assert basic_fig['data'][0]['y'][0] == 80\n\n assert basic_fig['data'][0]['y'][1] == 15\n\n def test_get_stats_box_plot(self):\n assert basic_fig['data'][1]['type'] == 'box'\n\n assert basic_fig['data'][1]['y'][0] == 80\n\n assert basic_fig['data'][1]['y'][1] == 15\n\n def test_get_stats_layout(self):\n assert basic_fig['layout']['xaxis']['showgrid'] is False\n\n assert basic_fig['layout']['xaxis']['zeroline'] is False\n","repo_name":"WheatonCS/Lexos","sub_path":"test/unit_test/test_stats_model.py","file_name":"test_stats_model.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"81"} +{"seq_id":"17574528073","text":"responses0 = {\n 0: [\"cat\", \"lion\", \"flower\", \"wolf\", \"desert\"],\n 1: [\"rain\", \"water\", \"drop\", \"faucet\", \"sad\"],\n 2: [\"heart\", \"love\", \"sun\", \"ball\", \"daytime\"],\n}\n\nresponses1 = {\n 0: [\"flower\", \"daisy\", \"horse\", \"feline\", \"wild\"],\n 1: [\"rainy\", \"wet\", \"cold\", \"shower\", \"spring\"],\n 2: [\"red\", \"lonely\", \"alone\", \"sad\", \"family\"],\n}\n\nresponses2 = {\n 0: [\"backpack\", \"pencil\", \"school\", \"yellow\", \"cat\"],\n 1: [\"beach\", \"ocean\", \"sand\", \"wave\", \"sun\"],\n 2: [\"fun\", \"soccer\", \"game\", \"run\", \"team\"],\n}\n\nresponses_history = [(0, responses0), (1, responses1), (2, responses2)]","repo_name":"Hack-Brown2021/rorschach-friending","sub_path":"flask-app/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24717578366","text":"# Structure Guided Refiner\n\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom diffusers.configuration_utils import ConfigMixin, register_to_config\nfrom diffusers.models.modeling_utils import ModelMixin\n\nclass SGREncoderConv(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channnels: int,\n kernel_size: int = 4,\n stride: int = 2,\n padding: int = 1\n ):\n super().__init__()\n self.layers = nn.Sequential(\n nn.Conv2d(in_channels, out_channnels, kernel_size, stride=stride, padding=padding),\n nn.ReLU()\n )\n\n def forward(self, x: Tensor) -> Tensor:\n out = self.layers(x)\n return out\n\nclass SGREmbedding(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n num_layers: int = 4,\n kernel_size: int = 4,\n stride: int = 2,\n padding: int = 1\n ):\n super().__init__()\n self.embed = nn.Sequential(\n SGREncoderConv(in_channels, out_channels, kernel_size, stride=stride, padding=padding),\n *[\n SGREncoderConv(\n out_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding\n ) for _ in range(num_layers - 2)\n ],\n SGREncoderConv(out_channels, out_channels, 3, stride=1, padding=1),\n )\n \n def forward(self, x: Tensor) -> Tensor:\n out = self.embed(x)\n return out\n\nclass SGREncoder(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n in_channels: int = 3,\n out_channels: int = 4,\n num_conditions: int = 3,\n num_layers: int = 4,\n kernel_size: int = 4,\n stride: int = 2,\n padding: int = 1\n ):\n super().__init__()\n self.num_conditions = num_conditions\n self.embeds = nn.ModuleList(\n [\n SGREmbedding(\n in_channels,\n out_channels,\n num_layers=num_layers,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding\n ) for _ in range(num_conditions)\n ]\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"\n Parameters:\n x (`torch.Tensor`)\n A tensor with the following shape `(num_branches, batch, channel, height, width)`\n \"\"\"\n if x.shape[0] != self.num_conditions:\n raise ValueError(f\"number of input conditions must be {self.num_conditions}, but got {x.shape[0]}\")\n encoded = torch.stack([embed(x) for x, embed in zip(x, self.embeds)])\n return encoded.sum(dim=0)\n","repo_name":"MasahideOkada/HyperHuman","sub_path":"hyper_human/models/sgr.py","file_name":"sgr.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"30437752987","text":"import os\n\nfirst_grades = [\"cnn\", \"cnn_no_bn\", \"mlp\", \"mlp_no_bn\"]\nsecond_grades = [\"model_1\", \"model_2\", \"model_3\", \"model_4\", \"model_5\"]\n\nfor first_grade in first_grades:\n for second_grade in second_grades:\n acc_dir = os.path.join(os.path.join(first_grade, \"result\"), os.path.join(second_grade, \"acc.png\"))\n loss_dir = os.path.join(os.path.join(first_grade, \"result\"), os.path.join(second_grade, \"loss.png\"))\n target_acc_dir = os.path.join(\"images\", first_grade+'_'+second_grade+'_'+'acc.png')\n target_loss_dir = os.path.join(\"images\", first_grade+ '_' + second_grade + '_' + 'loss.png')\n print(acc_dir, loss_dir)\n os.system('cp ' + acc_dir + ' ' + target_acc_dir)\n os.system('cp ' + loss_dir + ' ' + target_loss_dir)\n","repo_name":"zhaoxh16/Artificial-Neural-Network-Homework-2","sub_path":"get_images.py","file_name":"get_images.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6896608126","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.app_context().push()\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///new-books-collection.db'\ndb = SQLAlchemy()\ndb.init_app(app)\n\n\nclass Book(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(250), unique=True, nullable=False)\n author = db.Column(db.String(250), nullable=False)\n rating = db.Column(db.Float(), nullable=False)\n\n def __repr__(self):\n return f\"\"\n\n\ndb.create_all() # create database file and table\n\n# create a new record\nbook = Book(id=1, title='Harry Potter', author='J. K. Rowling', rating=9.3)\n\n# Alternative:\n# book = Book()\n# book.id = 1\n# book.title = 'Harry Potter'\n# book.author = 'J. K. Rowling'\n# book.rating = 9.3\n\ndb.session.add(book)\ndb.session.commit()\n\n# read all records (bd.session.query is legacy)\nall_books = db.session.execute(db.select(Book).order_by(Book.author)).scalars().all()\nprint(all_books)\n\n# read a particular record\nparticular_book = db.first_or_404(db.select(Book).filter_by(title='Harry Potter'))\nprint(particular_book)\n\n# update a particular record\nparticular_book.title = 'Harry Potter and the Chamber of Secrets'\ndb.session.commit()\nprint(particular_book) # checked the database too\n\n# update a record by primary key\nbook_id = 1\nbook_to_update = db.one_or_404(db.select(Book).filter_by(id=1))\nbook_to_update.title = 'Harry Potter and the Goblet of Fire'\ndb.session.commit()\nprint(book_to_update)\n\n# delete a particular record by primary key\nbook_id = 1\nbook_to_delete = db.one_or_404(db.select(Book).filter_by(id=1))\ndb.session.delete(book_to_delete)\ndb.session.commit() # checked the database too\n\n# import sqlite3\n#\n# db = sqlite3.connect('books-collection.db')\n#\n# cursor = db.cursor()\n# create_table_sql = \"\"\"\n# CREATE TABLE books (\n# id INTEGER PRIMARY KEY,\n# title varchar(250) NOT NULL UNIQUE,\n# author varchar(250) NOT NULL,\n# rating FLOAT NOT NULL\n# )\n# \"\"\"\n# cursor.execute(create_table_sql)\n#\n# add_book_sql = \"\"\"\n# INSERT INTO books\n# VALUES (1, 'Tale of Two Cities', 'Charles Dickens', 9.3)\n# \"\"\"\n#\n# cursor.execute(add_book_sql)\n# db.commit()\n","repo_name":"AlexGose/100-days-of-python","sub_path":"day63-book-review-site/SQLite/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16574846058","text":"from setuptools import setup, find_packages\n\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\nsetup(\n author=\"Simon Garisch\",\n author_email=\"gatman946@gmail.com\",\n description=\"A typing tutor built with PyQt.\",\n install_requires=[\"PyQt5==5.10.1\"],\n license=\"MIT license\",\n long_description=readme,\n include_package_data=True,\n keywords=\"pytypist\",\n name=\"pytypist\",\n packages=find_packages(include=[\"pytypist\"]),\n test_suite=\"tests\",\n url=\"https://github.com/simongarisch/pytypist\",\n version=\"0.1.0\",\n)","repo_name":"simongarisch/pytypist","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22423505927","text":"import math\nfrom Vk import VK\nimport sqlite3 as sql\nfrom tqdm import tqdm\nfrom DataBase import DataBase\n\ntoken = input(\"token : \")\ndomain = input(\"domain : \")\nname_table = input(\"Name for the new table: \")\nvk = VK()\nconnection = sql.connect(\"posts.db\")\ndbase = DataBase(connection)\n\nif __name__ == \"__main__\":\n dbase.newTable(name_table)\n postsCount = vk.getCount(domain, token)\n print(\"Count posts: \", postsCount)\n count = math.ceil(postsCount / 100)\n k = 100\n\n with tqdm(total=count) as ProgressBar:\n for i in range(0, postsCount, k):\n News = vk.wallGet(domain,token, i)\n l = len(News[\"response\"][\"items\"])\n if l is None or l < 1:\n input(\"here\")\n for n in range(0, l, 1):\n dbase.addNews(News[\"response\"][\"items\"][n], name_table)\n ProgressBar.update()","repo_name":"khudy01/parserVK","sub_path":"parser_VK.py","file_name":"parser_VK.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23591799920","text":"# p227_starter_one_button_shell.py\r\n# Note this will not run in the code editor and must be downloaded\r\n\r\nimport subprocess\r\nimport tkinter as tk\r\nimport tkinter.scrolledtext as tksc\r\nfrom tkinter import filedialog\r\nfrom tkinter.filedialog import asksaveasfilename\r\n\r\n\r\ndef mSave():\r\n filename = asksaveasfilename(defaultextension='.txt',filetypes = (('Text files', '*.txt'),('Python files', '*.py *.pyw'),('All files', '*.*')))\r\n if filename is None:\r\n return\r\n file = open (filename, mode = 'w')\r\n text_to_save = command_textbox.get(\"1.0\", tk.END)\r\n \r\n file.write(text_to_save)\r\n file.close()\r\n\r\n\r\ndef do_command(command):\r\n global command_textbox, url_entry\r\n # If url_entry is blank, use localhost IP address \r\n url_val = url_entry.get()\r\n if (len(url_val) == 0):\r\n # url_val = \"127.0.0.1\"\r\n url_val = \"192.168.1.222\"\r\n \r\n command_textbox.delete(1.0, tk.END)\r\n command_textbox.insert(tk.END, command + \" working....\\n\")\r\n command_textbox.update()\r\n\r\n p = subprocess.Popen(command + url_val, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #v2\r\n\r\n cmd_results, cmd_errors = p.communicate()\r\n command_textbox.insert(tk.END, cmd_results)\r\n command_textbox.insert(tk.END, cmd_errors)\r\n\r\nroot = tk.Tk()\r\nframe = tk.Frame(root,bg='antique white')\r\nframe.pack(fill='x')\r\n\r\n# set up button to run the do_command function\r\nsave_btn = tk.Button(frame,\r\n text=\"save\",\r\n fg= 'blue',\r\n bg= 'linen',\r\n command=mSave)\r\nsave_btn.pack(side='right')\r\n\r\nping_btn = tk.Button(frame, \r\ntext=\"ping\", \r\nfg='blue',\r\nbg='linen',\r\ncommand=lambda:do_command(\"ping \"))\r\nping_btn.pack(side='left')\r\n\r\ntracert_btn = tk.Button(frame,\r\ntext=\"tracert\", \r\nfg='blue',\r\nbg='linen',\r\ncommand=lambda:do_command(\"tracert \"))\r\ntracert_btn.pack(side='left')\r\n\r\nnslookup_btn = tk.Button(frame,\r\ntext=\"nslookup\",\r\nfg='blue',\r\nbg='linen',\r\ncommand=lambda:do_command(\"nslookup \"))\r\nnslookup_btn.pack(side='left')\r\n\r\n# creates the frame with label for the text box\r\nframe_URL = tk.Frame(root,bg=\"antique white\") # change frame color\r\nframe_URL.pack(side='left',fill='both')\r\n\r\n# decorative label\r\nurl_label = tk.Label(frame_URL, text=\"Enter a URL of interest: \", \r\n font=(\"Arial\", 14),\r\n fg=\"blue\",\r\n bg=\"white\")\r\nurl_label.pack(side=tk.LEFT)\r\nurl_entry= tk.Entry(frame_URL, font=(\"comic sans\", 14)) # change font\r\nurl_entry.pack(side=tk.LEFT)\r\n\r\nframe = tk.Frame(root) # change frame color\r\nframe.pack(fill='both')\r\n\r\ncommand_textbox = tksc.ScrolledText(frame, height=10, width=100)\r\ncommand_textbox.pack(fill='both')\r\n\r\nroot.mainloop()","repo_name":"Joseph-2/2_2_7_project","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"716271087","text":"from odoo import api, fields, models, _, tools\nfrom datetime import datetime, timedelta\nfrom odoo.exceptions import ValidationError\n\n\nclass TravelPackage(models.Model):\n _name = \"travel.package\"\n _description = \"Tour packages\"\n\n responsible_id = fields.Many2one('res.partner', string='customer')\n Quotationdate = fields.Date(string='Quotation Date')\n vehicletypes = fields.Selection([\n ('bus', 'bus'),\n ('traveller', 'travellor'),\n ('van', 'van'),\n ('other', 'other')\n ], required=True, default='bus')\n # vehicletypes=fields.Many2one(comodel_name=\"travel.service\",string=\"vehicle types\")\n name = fields.Char(string='name')\n numberofseats = fields.Integer(string='Number of seats',\n default='1')\n date = fields.Date('Date', default=fields.Datetime.today())\n\n sourcelocation = fields.Selection([\n ('thrissur', 'thrissur'),\n ('kozikkod', 'kozikkod'),\n ('palakkad', 'palakkad'),\n ('malappuram', 'malappuram')], string='Source location')\n\n destinationlocation = fields.Selection([\n ('banglore', 'Banglore'),\n ('chennai', 'Chennai'),\n ('coimbatore', 'Coinbatore'),\n ('munnar', 'Munnar')], string='Destination location')\n\n startdate = fields.Date('Start Date', default=fields.Datetime.today())\n endtdate = fields.Date('End Date')\n ntravelers = fields.Integer('Number Of Travelers')\n facilities = fields.Many2one(comodel_name=\"travel.fecilities\",\n string='fecilties')\n vehicle = fields.Many2one(comodel_name=\"travel.vehicle\",\n string='vehicle')\n state = fields.Selection([('draft', 'Draft'), ('confirm', 'confirm')],\n default=\"draft\", sting=\"status\")\n estimatedkm = fields.Float(string='EstimatedKM')\n warning = fields.Boolean(default=False)\n\n estimation_ids = fields.Many2many('travel.vehiclecharges.lines',\n 'vehicle_id', string='Estimation')\n travel_ids = fields.One2many('travel.estimation',\n 'service_id', string='travel charges')\n def package_request(self):\n self.state = 'confirm'\n self.vehicle.state='notavailable'\n if self.vehicle.state!=\"avaiable\":\n raise ValidationError(_(\"Please enter all guest details\"))\n\n else:\n self.vehicle.state = 'notavailable'\n vals = {\n 'responsible_id': self.responsible_id.id,\n }\n travel_rec = self.env['travel.customer'].create(vals)\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Travel',\n 'view_mode': 'form',\n 'res_model': 'travel.customer',\n 'res_id': travel_rec.id,\n }\n\n def return_request(self):\n #self.state = 'return'\n self.vehicle.state = 'available'\n\n @api.onchange('vehicletypes')\n def filter_vehicle(self):\n print('rsss')\n if self.vehicletypes:\n print(self.vehicletypes)\n return {\n 'domain': {'vehicle': [('vehicletypes', '=',\n self.vehicletypes)]}}\n\n # def action_add_product(self, line=line):\n #\n # line = self.env['travel.customer'].create(\n # {\n # 'vehiclecharges': self.vehicletypes,\n # 'service_id': self.travel_id\n # }\n # )\n\n\nclass TravelEstmation(models.Model):\n _name = \"travel.estimation\"\n _description = \"Travel Estimasion\"\n service_id = fields.Many2one(comodel_name='travel.service',\n string='Service')\n vehiclecharges = fields.Integer(string=\"Amound\")\n travel_id = fields.Many2one('travel.package',string=\"travel\")\n# # vehiclecharges = fields.Integer(string=\"Amound\", name=\"vehicle_charges\")\n# # estimatedkm = fields.Float(string=\"EstimatedKM\")\n# #\n# # estimation_id = fields.Many2one('travel.package',string=\"estimation\")\n# #\n","repo_name":"Arunp771/MyProject","sub_path":"models/tourpackage.py","file_name":"tourpackage.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21794901264","text":"if __name__ == '__main__':\n s = input()\n alnumflag = False\n alphaflag = False\n digitflag = False\n lowerflag = False\n upperflag = False\n\n for item in s:\n if item.isalnum():\n alnumflag = True\n\n if item.isalpha():\n alphaflag = True\n\n if item.isdigit():\n digitflag = True\n\n if item.islower():\n lowerflag = True\n\n if item.isupper():\n upperflag = True\n\n print(alnumflag)\n print(alphaflag)\n print(digitflag)\n print(lowerflag)\n print(upperflag)\n","repo_name":"akshayjain3450/HackerRank","sub_path":"Python Language/stringval.py","file_name":"stringval.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"19484046088","text":"# -*- coding: utf-8 -*-\n# @Author : caiweichao\n# @explain : 优化后日志类\nimport logging\n\nfrom Commons import log_process\nfrom ConfigFile.contants_file import *\nfrom ConfigFile.contants_test import *\n\n# 日志收集器\nlogger = logging.getLogger(\"Log\")\n# 定义输出级别\nlogger.setLevel(LOG_LEVEL)\n\n\ndef set_handler(levels):\n if levels == 'error': # 判断如果是error就添加error的handler\n logger.addHandler(Log.error_handle)\n else: # 其他添加到infohandler\n logger.addHandler(Log.handler)\n logger.addHandler(Log.ch) # 全部输出到console\n\n\ndef remove_handler(levels):\n if levels == 'error':\n logger.removeHandler(Log.error_handle)\n else:\n logger.removeHandler(Log.handler)\n logger.removeHandler(Log.ch)\n\n\nclass Log:\n __obj = None\n\n @staticmethod\n def __new__(cls, *args, **kwargs):\n if not cls.__obj:\n cls.__obj = super().__new__(cls)\n return cls.__obj\n else:\n return cls.__obj\n\n # 实例化文件管理类\n log_process = log_process.LogProcess()\n # 调用创建文件,传参确认文字日志\n log_dir = log_process.get_log_dir()\n # 指定输出文件\n log_file = os.path.join(log_dir[0], 'logs.log')\n # 设置日志输出格式\n formatter = logging.Formatter(fmt=FORMATTER)\n # 指定输出渠道\n # 控制台输出\n ch = logging.StreamHandler()\n ch.setLevel(LOG_LEVEL_CONSILE)\n ch.setFormatter(formatter)\n # INFO日志输出\n handler = logging.FileHandler(filename=log_file, encoding='utf-8')\n handler.setLevel('DEBUG')\n handler.setFormatter(formatter)\n # 错误日志输出\n error_handle = logging.FileHandler(filename=log_file, encoding='utf-8')\n error_handle.setLevel('ERROR')\n error_handle.setFormatter(formatter)\n\n @staticmethod\n def debug(msg):\n set_handler('debug')\n logger.debug(msg)\n remove_handler('debug')\n\n @staticmethod\n def info(msg):\n set_handler('info')\n logger.info(msg)\n remove_handler('info')\n\n @staticmethod\n def error(msg):\n set_handler('error')\n # 同时输出异常信息\n logger.error(msg, exc_info=True)\n remove_handler('error')\n","repo_name":"caiweichao/auto-test","sub_path":"Commons/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14995565981","text":"from pathlib import Path\nimport subprocess\nimport argparse\n\nPLUGINS = 'plugins'\n\ndef start_analysis(command: list):\n try:\n subprocess.run(args=command, stderr=subprocess.STDOUT, cwd=str(Path.cwd()))\n except subprocess.CalledProcessError as err:\n print('Status : FAIL', err.returncode)\n\n\ndef build_command(ghidra: Path, import_: Path) -> list:\n ghidra = ghidra / 'support' / 'analyzeHeadless'\n project_root = Path.cwd()\n tmp = project_root / 'tmp'\n if not tmp.is_dir():\n tmp.mkdir()\n command = [str(ghidra), str(tmp), 'PcodeExtractor', '-import', str(import_), '-postScript', 'PcodeExtractor.java', str(tmp / (import_.name + '.json')), '-scriptPath', str(project_root), '-deleteProject']\n\n return command\n\n\ndef plugin_folder_exists(path: Path):\n plugin_path = path / PLUGINS\n if not plugin_path.is_dir():\n plugin_path.mkdir()\n\ndef is_in_classpath(location: Path, filename: str) -> bool:\n plugin_path = location / PLUGINS\n if list(plugin_path.glob('gson*.jar')):\n return True\n return False\n\n\ndef is_directory(parser: argparse.ArgumentParser, path: str) -> Path:\n dir = Path(path)\n if dir.is_dir() and 'ghidra' in path:\n return dir\n parser.error(f'Given Ghidra path {path} is not valid.')\n\n\ndef handle_gson(parser: argparse.ArgumentParser, path: str) -> Path:\n file = Path(path)\n if file.is_file():\n if 'gson' in path and file.suffix == '.jar':\n return file\n parser.error(f'Given file {path} is not a jar file or contains the name gson.')\n parser.error(f'Gson library could not be found at {path}.')\n\n\ndef is_file(parser: argparse.ArgumentParser, path: str) -> Path:\n file = Path(path)\n if file.is_file():\n return file\n parser.error(f'Binary could not be found at {path}.')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-n', '--gson', dest='gson', help='Path to Gson library required in Ghidra\\'s classpath. Has to be set with the plugin parameter.',\n metavar='FILE', type=lambda f: handle_gson(parser, f))\n\n parser.add_argument('-g', '--ghidra', required=True, dest='ghidra', help='Path to Ghidra. Ends in .../ghidra_9.X.X_PUBLIC/.',\n metavar='PATH', type=lambda d: is_directory(parser, d))\n\n parser.add_argument('-p', '--plugin', dest=\"plugin\", help='Path to ghidra\\'s plugins directory where gson.jar should be placed. Ends in .../.ghidra/.ghidra_9.X.X_PUBLIC/. Has to be set with the gson parameter',\n metavar='PATH', type=lambda p: is_directory(parser, p))\n\n parser.add_argument('-i', '--import', required=True, dest='import_', help='Path to binary which is to be analysed by Ghidra.',\n metavar='FILE', type=lambda f: is_file(parser, f))\n\n args = parser.parse_args()\n\n # If a gson library is specified, the path to .../.ghidra/.ghidra_9.X.X_PUBLIC/ also needs to be specified\n if args.gson and not args.plugin or not args.gson and args.plugin:\n parser.error('--gson and --plugin have to be set together.')\n\n # check whether the plugins folder exist in .../.ghidra/.ghidra_9.X.X_PUBLIC/. If not, create it.\n if args.plugin:\n plugin_folder_exists(args.plugin)\n # check whether there already is a gson file in .../.ghidra/.ghidra_9.X.X_PUBLIC/plugins/\n if is_in_classpath(args.plugin, args.gson.name):\n print(f'\\nGson lib {args.gson} already in Ghidra classpath.\\n')\n else:\n # move the gson file from the specfied location to .../.ghidra/.ghidra_9.X.X_PUBLIC/plugins/\n args.gson.replace(args.plugin / PLUGINS / args.gson.name)\n\n return args\n\n\ndef main():\n args = parse_args()\n command = build_command(args.ghidra, args.import_)\n start_analysis(command=command)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mellowCS/p_code_extractor","sub_path":"start_analysis.py","file_name":"start_analysis.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"39111060807","text":"#coding: utf-8\nfrom socket import * \nfrom threading import Thread\nimport time, pickle\nfrom classes import *\n\nip_server = '192.168.0.9' # IP of server to connect\nserverPort = 12000 # port to connect\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((ip_server,serverPort))\n\ndef getMessage():\n\twhile 1:\n\t\tmessage = clientSocket.recv(1024)\n\t\ts = '\\n------ Mensagem do servidor ------\\n' + str(pickle.loads(message))\n\t\tprint(s + '\\n----------------------------------\\n')\n\t\ttime.sleep(1)\n\n\n#start here!\nprint ('Client started!\\n')\nThread(target=getMessage, args=()).start()\ntime.sleep(1)\nwhile 1:\n\tcmd = raw_input('Input the command: ')\n\tnick = raw_input('Input the nick: ')\n\tmsg = raw_input('Input the msg: ')\n\tmsg = Message(clientSocket.getsockname(),(ip_server,serverPort),nick,cmd,msg)\n\tclientSocket.send(pickle.dumps(msg))\n\t","repo_name":"higor21/WhatsApp-Project","sub_path":"Project - Python/copy/clientD_cp.py","file_name":"clientD_cp.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28360680335","text":"''' telegram bot blank '''\nfrom typing import List, Dict, Callable\nfrom functools import partial\nfrom bot_former import bot\nfrom bot_former import thread\nfrom bot_former import talk\nfrom bot_former import multitalk\nfrom bot_former import user\nfrom bot_former import let_me_in\nfrom bot_former import permissions_handler as perms\nfrom bot_former import log\n\nlogger = log.get_logger(__name__, console_level=10, file_level=10)\n\nclass Handler(bot.Bot):\n ''' bot description '''\n def __init__(self, actions: Dict[str, List[Callable]], owner: List[int], threader : bool = False) -> None:\n self.actions = {action.action_id : action\n for key_word in actions\n for action in actions[key_word]}\n super().__init__(perms.actions_to_inline(actions))\n self.thread = thread.ThreadObj(threader)\n self.owner = owner\n\n def __sub_functions__(self, action : Callable, curr_talk : object) -> None:\n ''' wrap report object with bot functions '''\n bot_funcs = {'print' : partial(self.user_output, this_talk = curr_talk),\n 'input' : partial(self.user_input, this_talk = curr_talk),\n 'after' : partial(self.clean_after, this_talk = curr_talk),\n 'print_img' : partial(self.img_output, this_talk = curr_talk)}\n for function in bot_funcs:\n if hasattr(action, function):\n setattr(action, function, bot_funcs[function])\n\n def __new_user_confirm__(self, message: Dict[str, any]) -> None:\n ''' ask owner if new user shoud be added '''\n logger.debug('Add user request: %s', message['from'])\n mtalk = multitalk.Multitalk(message['chat']['id'],\n message['message_id'],\n 'join',\n message[\"from\"]\n )\n for admin in self.owner:\n msg_to_answer = self.__send_message_with_inline__(chat_id= admin,\n reply_to_message_id= None,\n text= f'Добавить нового пользователя {message[\"from\"][\"username\"]} ({message[\"from\"][\"first_name\"]} {message[\"from\"][\"last_name\"]})?',\n markup= perms.admins_inline(mtalk.mtalk_id)['/join'])\n mtalk.update(msg_history=[admin, msg_to_answer])\n\n def __manage_message__(self, message: dict) -> None:\n ''' work with responses'''\n this_user = user.find_user_by_id(message['from']['id'])\n if this_user is None and message['text'] == '/join':\n self.__new_user_confirm__(message)\n elif this_user is None:\n pass\n else:\n user_keyboards = perms.get_user_inline(this_user, self.inline_keyboards)\n if message['text'].lower() in user_keyboards:\n cur_talk = talk.Talk(this_user.user_id, message['chat']['id'], message['message_id'])\n cur_talk.new_msg(self.__send_message_with_inline__(\n **cur_talk.ids(),\n text = 'Что делать?',\n markup = user_keyboards[message['text'].lower()]))\n\n def __manage_callback_query__(self, callback: dict) -> None:\n ''' work with responses'''\n if callback['data'][:2]=='mt':\n self.__manage_mtalk__(callback)\n else:\n self.__manage_talk__(callback)\n\n def __manage_talk__(self, callback: dict) -> None:\n ''' work with responses'''\n curr_talk = talk.find_talk(callback['message']['chat']['id'],\n callback['from']['id'],\n callback['message']['reply_to_message']['message_id'])\n if curr_talk is None:\n logger.debug('Talk by parameters not found: chat_id %s, user_id %s, message_id %s',\n callback['message']['chat']['id'],\n callback['from']['id'],\n callback['message']['reply_to_message']['message_id'])\n return\n\n current_action = self.actions[int(callback['data'])]\n self.__sub_functions__(current_action, curr_talk)\n try:\n self.thread.thread_start(current_action)\n curr_talk.add_task_info(current_action.result_string)\n except Exception as ex:\n self.user_output(f'Exception occured {str(ex)}', curr_talk)\n\n def __manage_mtalk__(self, callback: dict) -> None:\n ''' work with responses'''\n mark, mtalk_id, task_id = callback['data'].split('_')\n curr_mtalk = multitalk.find_mtalk(int(mtalk_id))\n if curr_mtalk.closed:\n self.__send_message__(text = 'This task closed already with stat: {curr_mtalk.stat}',\n chat_id = callback['message']['chat']['id'])\n self.__delete_message__(chat_id= callback['message']['chat']['id'],\n message_id= callback['message']['id'])\n else:\n result = perms.admin_actions(int(task_id))(curr_mtalk.args)\n curr_mtalk.update(result, True)\n for chat, msg in curr_mtalk.msg_history:\n self.__send_message__(text = result,\n chat_id = chat)\n self.__delete_message__(chat_id = chat,\n message_id = msg)\n\n def user_output(self, text: str, this_talk: object) -> None:\n ''' send message to user '''\n _msg = self.__send_message__(text, **this_talk.ids())\n this_talk.new_msg(_msg)\n\n def user_input(self, this_talk: object) -> str:\n ''' wait for user answer '''\n self.thread.acquire()\n while True:\n try:\n responses = self.__get_updates__()\n for response in responses:\n if 'message' in response:\n self.thread.release()\n this_talk.new_msg(response['message']['message_id'])\n return response['message']['text']\n self.__manage_response__(response)\n except KeyError:# no key ['result'] on empty responce\n pass\n\n def img_output(self, img_path: str, this_talk: object) -> None:\n ''' send image to user '''\n _msg = self.__send_img__(img_path, **this_talk.ids())\n this_talk.new_msg(_msg)\n\n def clean_after(self, this_talk : object) -> None:\n ''' clean up task '''\n for _msg in this_talk.msgs[:-1]:\n self.__delete_message__(chat_id = this_talk.chat_id,\n message_id = _msg)","repo_name":"aneknana/Bot_former","sub_path":"bot_former/action_handler.py","file_name":"action_handler.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18083849976","text":"#-*- coding:utf-8 -*-\nimport requests\nimport bs4\nimport json\nimport re\nimport sqlite3\nimport os\nimport threading\n#######\n# 西刺代理\nheaders = {\n'User-Agent':'\"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20\",',\n}\n\nhp_url ='http://www.xicidaili.com/wt'\ndef fetch_xici():\n url = hp_url\n page_content = requests.get(url,headers=headers)\n str_content = page_content.text\n # print(str_content)\n soup = bs4.BeautifulSoup(str_content,'lxml')\n tr_list = soup.find_all('tr',attrs={'class':['odd','']})\n ip_list = []\n ip_rule = re.compile(r'(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3})')\n port_rule = re.compile(r'\\>(\\d+)\\<')\n for tr in tr_list:\n str_tr = str(tr)\n # print(str_tr)\n re_m = re.search(r'HTTP',str_tr)\n # print(re_m)\n if re_m:\n dic1 = {}\n ip = re.findall(ip_rule,str_tr)[0]\n port = re.findall(port_rule,str_tr)[0]\n dic1[\"http\"] = \"http://\" + ip + \":\" + port\n print(dic1)\n if verify_ip(dic1):\n print('可用')\n insertdata(dic1)\n ip_list.append(dic1)\n print('西刺',ip_list)\n # return ip_list\n\n\n\n\n# HTTPS 代理\n# hps_url = 'http://www.xicidaili.com/wn'\n\n# HTTP 代理\n\n\n# def xici_main():\n # 爬取 https 代理 ip\n # https_list = fetch_ip(hps_url)\n # 爬取 http 代理\n # http_list = fetch_ip(hp_url)\n # alldata = http_list + https_list\n # return alldata\n# xici_main()\n\n# =================================================================================\n# 有代理网\nu_url = 'http://www.youdaili.net/Daili/http/29487.html'\ndef fetch_udaili():\n url = u_url\n page_content = requests.get(url, headers=headers)\n str_content = page_content.text\n # print(str_content)\n # print(str_content)\n soup = bs4.BeautifulSoup(str_content, 'lxml')\n # script_tag = soup.find('script',text='@HTTP')\n p_tags = soup.find_all('p')\n rule = re.compile(r'(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}:\\d+)')\n alldata = []\n for p in p_tags:\n try:\n ip = re.findall(rule,str(p))[0]\n print(ip)\n dic = {}\n if ip:\n dic[\"http\"] = \"http://\"+ip\n if verify_ip(dic):\n print('通过')\n insertdata(dic)\n # alldata.append(dic)\n except:\n pass\n print('有代理',alldata)\n # return alldata\n\n# def udaili_main():\n# fetch_udaili(u_url)\n\n# =========================================================================\n# 66代理\nss_url ='http://www.66ip.cn/mo.php?tqsl=50'\nheaders1 = {\n'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n'Accept-Encoding':'gzip, deflate, sdch',\n'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.6',\n'Connection':'keep-alive',\n'Cookie':'__cfduid=dc82e63a299dce97b98b94d949f5a9bb61484641816; CNZZDATA1253901093=1728273565-1484639487-http%253A%252F%252Fwww.baidu.com%252F%7C1484701785; Hm_lvt_1761fabf3c988e7f04bec51acd4073f4=1484646251,1484646378,1484702884,1484703157; Hm_lpvt_1761fabf3c988e7f04bec51acd4073f4=1484704429',\n'Host':'www.66ip.cn',\n'Referer':'http://www.66ip.cn/pt.html',\n'Upgrade-Insecure-Requests':'1',\n'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'\n}\ndef fetch_ss():\n # headers['Cookie'] = cookie\n # headers['Referer'] = refer\n url = ss_url\n page_content = requests.get(url, headers=headers1)\n # print(page_content)\n str_content = page_content.text\n rule = re.compile(r'(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}:\\d+)')\n result = re.findall(rule,str_content)\n # print(result)\n alldata = []\n for ip in result:\n dic = {}\n dic[\"http\"] = \"http://\"+ip\n if verify_ip(dic):\n print('通过')\n insertdata(dic)\n # alldata.append(dic)\n\n print('66代理',alldata)\n # return alldata\n\n# fetch_ss(ss_url)\n\n# 首次验证 ip 是否可用\ndef verify_ip(dic):\n proxies = dic\n fixed_url = 'http://www.baidu.com/'\n try:\n res = requests.get(fixed_url,proxies=proxies,timeout=2)\n # print(res.text)\n if 'STATUS OK' in res.text:\n return 1\n else:\n return\n except:\n return\n\n\n######################################################\n# 建立或连接数据库\ndef nsqlite():\n DATABASE = 'ip_list.db'\n created = os.path.exists(DATABASE)\n conn = sqlite3.connect(DATABASE)\n if not created:\n conn.execute('''\n CREATE TABLE IPLIST\n (\n ID INTEGER PRIMARY KEY,\n IP CHAR(30) NOT NULL\n );\n ''')\n return conn\n\n# 查重\ndef search(db,ip):\n sql = r'SELECT * FROM IPLIST WHERE IP= \"%s\";' %(ip)\n query = db.execute(sql)\n result = query.fetchall()\n if len(result) == 0:\n return 1\n\n# 插入\ndef insertdata(ip):\n DATABASE = 'ip_list.db'\n # created = os.path.exists(DATABASE)\n conn = sqlite3.connect(DATABASE)\n sql = r'''\n INSERT INTO IPLIST (ID,IP)\n VALUES (NULL,\"%s\")\n '''%(ip)\n conn.execute(sql)\n conn.commit()\n\ndef showall(db):\n sql = r'SELECT * FROM ADDRESSLIST'\n query = db.execute(sql)\n for q in query:\n print(q)\n\n\n# def main():\n# # 执行西刺查询\n# xici_list = xici_main()\n# # 执行 有代理 查询\n# udl_list = fetch_udaili()\n# # 执行 66 查询\n# ss_list = fetch_ss()\n# # 合并列表\n# total_list = xici_list + udl_list + ss_list\n# # set 去重\n# # set_tl = set(total_list)\n# # 连接数据库,拿到列表指针\n# db = nsqlite()\n# # 判断并写入\n# for ip in total_list:\n# # 转为字符串\n# # ip = str(ip)\n# # 如果数据库无重复数据\n# print(ip)\n# print(type(ip))\n# if search(db,ip):\n# # 写入\n# insertdata(db, ip)\n# # 否则继续循环\n# 多线程走起\nfuncs = [fetch_xici,fetch_udaili,fetch_ss]\ndef main():\n\n DATABASE = 'ip_list.db'\n # created = os.path.exists(DATABASE)\n conn = sqlite3.connect(DATABASE)\n # if not created:\n # conn.execute('''\n # CREATE TABLE IPLIST\n # (\n # ID INTEGER PRIMARY KEY,\n # IP CHAR(30) NOT NULL\n # );\n # ''')\n\n threads = []\n total_list = []\n for i in range(len(funcs)):\n t = threading.Thread(target=funcs[i])\n threads.append(t)\n for i in range(len(funcs)):\n threads[i].start()\n for i in range(len(funcs)):\n threads[i].join()\n # total_list += threads[i]\n # 连接数据库,拿到列表指针\n conn.close()\n # 判断并写入\n # for ip in total_list:\n # # 转为字符串\n # # ip = str(ip)\n # # 如果数据库无重复数据\n # print(ip)\n # print(type(ip))\n # if search(db, ip):\n # # 写入\n # insertdata(db, ip)\n # # 否则继续循环\n\n\n# 跑起来\nif __name__ == '__main__':\n while True:\n main()\n\n# 测试\n# nsqlite()\n# print('打开建立ok')\n# DATABASE = 'ip_list.db'\n# conn = sqlite3.connect(DATABASE)\n# ip = '{\"http\":\"123.123.345.67:8080\"}'\n# insertdata(conn,ip)\n# print('存入ok')","repo_name":"wangyanwu/Crossin-practices","sub_path":"crawl/fetch_ip.py","file_name":"fetch_ip.py","file_ext":"py","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"25294241740","text":"def imprimirPolinomios(lista,it):\n lon = len(lista)\n if lon == 1:\n if(lista[-1]) > 0:\n return \" + \" + str(abs(lista[-1]))\n elif(lista[-1] < 0):\n return \" - \" + str(abs(lista[-1]))\n elif (it == 0):\n return \" + 0\"\n else:\n return \"\"\n else:\n it = it + 1\n if(lista[-1] > 0):\n return \" + \" + str(abs(lista[-1])) + \"x^\" + str(lon-1) + imprimirPolinomios(lista[:-1],it)\n elif(lista[-1] < 0):\n return \" - \" + str(abs(lista[-1])) + \"x^\" + str(lon-1) + imprimirPolinomios(lista[:-1],it)\n else:\n return imprimirPolinomios(lista[:-1],it)\n\ndef sumaPol(pol1,pol2):\n lon1 = len(pol1)\n lon2 = len(pol2)\n if lon1 == 0:\n return pol2\n elif lon2 == 0:\n return pol1\n else:\n return [pol1[0] + pol2[0]] + sumaPol(pol1[1:lon1], pol2[1:lon2])\n\ndef restaPol(pol1,pol2):\n lon1 = len(pol1)\n lon2 = len(pol2)\n if lon1 == 0:\n for i in range(lon2):\n pol2[i] = -pol2[i]\n return pol2\n elif lon2 == 0:\n return pol1\n else:\n return [pol1[0] - pol2[0]] + restaPol(pol1[1:lon1], pol2[1:lon2])\n\n\ndef casoDePrueba():\n try:\n grado = int(input())\n if(grado >= 0 and grado <= 100):\n numeros = input().split()\n lista = list(map(int, numeros))\n if(len(lista) == (grado+1)):\n if(len(lista) > 1 and lista[-1] != 0) or (len(lista) == 1):\n grado2 = int(input())\n if (grado2 >= 0 and grado2 <= 100):\n numeros2 = input().split()\n lista2 = list(map(int, numeros2))\n if (len(lista2) == (grado2 + 1)):\n if (len(lista2) > 1 and lista2[-1] != 0) or (len(lista2) == 1):\n print(imprimirPolinomios(sumaPol(lista,lista2),0))\n print(imprimirPolinomios(restaPol(lista, lista2), 0))\n return True\n except:\n return False\n\nif __name__ == \"__main__\":\n while casoDePrueba():\n pass\n","repo_name":"alvaroegi/PracticaDAA2","sub_path":"sumaRestaPol.py","file_name":"sumaRestaPol.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13257828583","text":"from ..modular_exponentiation import modular_exponentiation\n\n\ndef is_composite(a: int, t: int, n: int, s: int) -> bool:\n \"\"\"\n Функция, выполняющая проверку числа n на составность с использованием алгоритма проверки чисел на простоту по методу Миллера-Рабина.\n\n :param a: Целое число, случайно выбранное из интервала [2, n-2].\n :param t: Целое число, такое что n - 1 = 2^s * t, где s - неотрицательное целое число и t - нечётное целое число.\n :param n: Число, которое проверяется на простоту.\n :param s: Неотрицательное целое число, такое что n - 1 = 2^s * t.\n :return: True, если число n составное (не простое), и False, если число n вероятно простое.\n \"\"\"\n\n x = modular_exponentiation(a, t, n)\n\n if x == 1 or x == n - 1:\n return False\n\n for _ in range(s - 1):\n x = modular_exponentiation(x, 2, n)\n\n if x == n - 1:\n return False\n\n return True\n","repo_name":"RipeCherries/information-protection","sub_path":"mycrypto/utils/is_composite.py","file_name":"is_composite.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1646976118","text":"import json\n\nprint(\"JSON Loader for AV Movie Catalog\")\nwith open('AV_Catalog.json','r') as f:\n data=json.load(f)\nf.close()\nprint(\"\\n This program will overwrite all data on existing AV_Catalog file.\")\nif input(\"Are you sure you wish to continue? y/n: \").lower()=='y':\n backup=data\n data.clear()\n index= int(input(\"Enter no. of Movies: \"))\n if index<1:\n print(\"You Piece of Trash!\")\n print(\"If I hadn't thought about this edge-case you would've just\\ndeleted all data in the JSON file.\")\n print(\"Donkey.\")\n else:\n for i in range(index):\n name=input(\"Movie Name: \")\n if len(name)>256:\n print(\"Character Limit Exceeded. Terminating Operation.\")\n data=backup\n else:\n synopsis=input(\"Synopsis: \")\n if len(synopsis)>2048:\n print(\"Character Limit Exceeded. Terminating Operation.\")\n data=backup\n else:\n image_url=input(\"Poster URL: \")\n if not (image_url.startswith('http://') or image_url.startswith('https://')):\n print(\"Invalid URL Entered. URL must begin with http:// or https://, Terminating Operation\")\n data=backup\n else:\n link=input(\"Drive Link: \")\n if not (link.startswith('http://') or link.startswith('https://')):\n print(\"Invalid URL Entered. URL must begin with http:// or https://, Terminating Operation\")\n data=backup\n else:\n datum={\n 'index':i,\n 'name':name,\n 'synopsis':synopsis,\n 'image_url':image_url,\n 'link':link\n }\n data.append(datum)\n print(data)\n \n if data!=backup:\n print(\"Data Collected, writing JSON\")\n else:\n print(\"Restoring Data / No Change\")\n\n with open('AV_Catalog.json','w') as f: \n json.dump(data,f, indent=4)\n f.close()\n print(\"JSON File saved.\")\nelse:\n print(\"Good choice.\")\n print(\"Data currently on AV_Catalog: \"+data)\n print(\"Come back later when ready.\")\nex=input(\"Press any key to continue.\")","repo_name":"abhinavgeethan/Private-Bot","sub_path":"load_catalog.py","file_name":"load_catalog.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23071712540","text":"# Python offers several ways to reverse a String. This is a classic thing\n# that lots of people want to do. It's probably easy to look up this\n# answer on Stack Overflow.\n#\n# This website of 30 Python Tips and Tricks also happens to point out\n# several ways to reverse a string, and it's a good read!\n#\n# http://www.techbeamers.com/essential-python-tips-tricks-programmers/?utm_source=mybridge&utm_medium=blog&utm_campaign=read_more#tip1\n\nuser_input = input('What string would you like to reverse? ')\n\ndef reverse_for_loop(s):\n s1 = ''\n for c in s:\n s1 = c + s1\n return s1\n\nif __name__ == '__main__':\n print(reverse_for_loop(user_input))\n","repo_name":"andrewemcmanus/python-challenges","sub_path":"challenges/02-reverse.py","file_name":"02-reverse.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"22986076305","text":"import stripe\nfrom django.conf import settings\nfrom django.views.generic import TemplateView\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom .models import Item\n\n\nstripe.api_key = settings.STRIPE_SECRET_KEY\n\n\nclass SuccessView(TemplateView):\n template_name = \"success.html\"\n\n\nclass CancelView(TemplateView):\n template_name = \"cancel.html\"\n\n\nclass ItemLandingPageView(TemplateView):\n template_name = \"item_landing.html\"\n\n def get_context_data(self, **kwargs):\n item_id = self.kwargs[\"pk\"]\n item, create = Item.objects.get_or_create(\n id=item_id,\n name='Item {}'.format(item_id),\n description='Description of Item {}'.format(item_id),\n price=1000 + int(item_id)\n )\n context = super(ItemLandingPageView, self).get_context_data(**kwargs)\n context.update({\n \"product\": item,\n \"STRIPE_PUBLISHABLE_KEY\": settings.STRIPE_PUBLISHABLE_KEY\n })\n return context\n\n\nclass RetrieveCheckoutSessionView(View):\n def get(self, request, *args, **kwargs):\n item_id = self.kwargs['pk']\n item, create = Item.objects.get_or_create(\n id=item_id,\n name='Item {}'.format(item_id),\n description='Description of Item {}'.format(item_id),\n price=1000 + int(item_id)\n )\n try:\n checkout_session = stripe.checkout.Session.create(\n payment_method_types=['card'],\n line_items=[\n {\n 'price_data': {\n 'currency': 'usd',\n 'unit_amount': item.price,\n 'product_data': {\n 'name': item.name,\n 'images': ['https://i.imgur.com/EHyR2nP.png'],\n },\n },\n 'quantity': 1,\n },\n ],\n metadata={\n \"product_id\": item.id\n },\n mode='payment',\n success_url='http://127.0.0.1:8000/success/',\n cancel_url='http://127.0.0.1:8000/cancel/',\n )\n return JsonResponse({\n 'sessionId': checkout_session.id\n })\n except Exception as error:\n return JsonResponse({\n 'error': str(error)\n })\n","repo_name":"viktornikolaev1995/stripe_integration","sub_path":"stripe_integration/product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18613952055","text":"\"\"\"\nLambda function to handle slackbot interaction\n\"\"\"\n\nimport re\nimport os\nimport base64\nimport json\nimport logging\nfrom slackeventsapi import SlackEventAdapter\nfrom flask import Flask, redirect, render_template, request\nfrom domain.integrations.jira import Jira\nfrom domain.integrations.zoom import Zoom\nfrom domain.token_data import TokenData\nfrom utils.aws import AwsUtils\nfrom utils.dynamo import DynamoUtils\nfrom services.oauth_services.slack_oauth_service import SlackOauthService\nfrom services.oauth_services.jira_oauth_service import JiraOauthService\nfrom services.oauth_services.zoom_oauth_service import ZoomOauthService\n\nfrom functions.invoke import invoke_lambda\n\napp = Flask(__name__)\n\nSLACK_SIGNING_SECRET = os.environ[\"SLACK_SIGNING_SECRET\"]\nSTAGE = os.environ[\"STAGE\"]\nslack_events_adapter = SlackEventAdapter(SLACK_SIGNING_SECRET, \"/slack/events\", app)\n\nUSERS_TABLE = os.environ[\"USERS_TABLE\"]\ndynamoResource = AwsUtils.get_dynamodb_resource()\n\nUSER_ID_REGEX = \"\\<@([^\\|]+)>\"\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\n@app.route(\"/\")\ndef is_alive():\n \"\"\"Test endpoint\"\"\"\n return \"Im up!\"\n\n\n@app.route(\"/signin\")\ndef signin():\n \"\"\"Test endpoint\"\"\"\n return render_template(\"index.html\")\n\n\n@app.route(\"/jira/auth\")\ndef jira_oauth():\n \"\"\"Endpoint to jira oauth flow\"\"\"\n code = request.args.get(\"code\") or \"no code!\"\n state = request.args.get(\"state\")\n decoded_state = base64.b64decode(state).decode()\n team_id, user_id = decoded_state.split(\":\")\n token_data: TokenData = JiraOauthService.get_token_data(code)\n try:\n account_id = JiraOauthService.get_jira_id(token_data.access_token)\n jira = Jira(token_data, account_id)\n DynamoUtils.save_jira_data(team_id, jira)\n return redirect(\n f\"https://slack.com/app_redirect?app=A01H45TA509&team={team_id}\", code=302\n )\n except Exception as e:\n logger.error(\n f\"Could not authenticate jira app for team: {team_id} and user: {user_id} - {e}\"\n )\n return \"\"\n\n\n@app.route(\"/slack/auth\")\ndef slack_auth():\n \"\"\"Endpoint to slack oauth flow\"\"\"\n code = request.args.get(\"code\") or \"no code!\"\n try:\n team_id, access_token = SlackOauthService.get_access_token(code)\n DynamoUtils.save_slack_access_token(team_id, access_token)\n return redirect(\n f\"https://slack.com/app_redirect?app=A01H45TA509&team={team_id}\", code=302\n )\n except Exception as e:\n logger.error(f\"ERROR: slack/oauth {e}\")\n\n\n@app.route(\"/zoom/auth\")\ndef zoom_auth():\n \"\"\"Endpoint to zoom oauth flow\"\"\"\n code = request.args.get(\"code\") or \"no code!\"\n state = request.args.get(\"state\")\n decoded_state = base64.b64decode(state).decode()\n team_id, _user_id = decoded_state.split(\":\")\n token_data: TokenData = ZoomOauthService.get_token_data(code)\n zoom = Zoom(token_data)\n DynamoUtils.save_zoom_data(team_id, zoom)\n return redirect(\n f\"https://slack.com/app_redirect?app=A01H45TA509&team={team_id}\", code=302\n )\n\n\n@app.route(\"/slack/command/\", methods=[\"POST\"])\ndef commands():\n \"\"\"\n Receives commands from slackbot\n \"\"\"\n logger.info(\"got command, dispatching to lambda...\")\n message = request.form\n payload = json.dumps(message).encode(\"utf-8\")\n invoke_lambda(f\"slackbot-{STAGE}-command\", \"Event\", payload)\n return \"\"\n\n\n@app.route(\"/interactive\", methods=[\"POST\"])\ndef interactive():\n \"\"\"\n Handles interactive events from Slack elements like buttons\n \"\"\"\n logger.info(\"Interaction received, dispatching to lambda...\")\n message = request.form\n json_payload = json.loads(message.get(\"payload\"))\n payload = json.dumps(json_payload).encode(\"utf-8\")\n invoke_lambda(f\"slackbot-{STAGE}-interactive\", \"Event\", payload)\n return \"\"\n\n\n@slack_events_adapter.on(\"message\")\ndef handle_message(event_data):\n \"\"\"\n Handles messages from slack\n \"\"\"\n logger.info(\"Message received, dispatching to lambda...\")\n payload = json.dumps(event_data[\"event\"]).encode(\"utf-8\")\n invoke_lambda(f\"slackbot-{STAGE}-message\", \"Event\", payload)\n return True\n\n\n@slack_events_adapter.on(\"app_mention\")\ndef handle_mention(event_data):\n \"\"\"\n Handles slackbot's mentions\n \"\"\"\n logger.info(\"Mention received, dispatching to lambda...\")\n payload = json.dumps(event_data[\"event\"]).encode(\"utf-8\")\n invoke_lambda(f\"slackbot-{STAGE}-mention\", \"Event\", payload)\n return True\n\n\ndef sanitise_incident_name(name):\n \"\"\"Remove unnecessary characters from incident name\"\"\"\n # remove bot id\n name = re.sub(USER_ID_REGEX, \"\", name)\n # remove command\n name = name.replace(\"new incident\", \"\").replace(\"create incident\", \"\").strip()\n return name\n\n\n# Error events\n@slack_events_adapter.on(\"error\")\ndef error_handler(err):\n \"\"\"Handle slack errors\"\"\"\n logger.error(\"ERROR: \" + str(err))\n\n\n# Start the flask server on port 3000\nif __name__ == \"__main__\":\n app.run(port=3000)\n","repo_name":"serenoapp/slackbot","sub_path":"functions/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"36036482360","text":"print(\"\"\"Здравствуйте!\nДанная программа предназначена для определения того, является\nли год високосным\"\"\")#Выводим приветствие и предназначение программы\nnum = int(input(\"Введите число, которое определяет год:\"))#Команда для ввода переменной\nif ((num%4 == 0) and (num%100 !=0)) or (num%400 == 0):\n year = \"LEAP\"#вводим новую переменную и присваиваем её соответственное значение\nelse:#В других случаях год не високосный\n year = \"COMMON\"#вводим новую переменную и присваиваем её соответственное значение\nprint(year)#вывод результатa\nprint(input(\"Нажмите клавишу \\\"Enter\\\" для окончания работы программы\"))#Команда для окончания программы\n","repo_name":"igortereshchenko/amis_python","sub_path":"km72/Gorodetskiy_Dmytro/4/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74457160903","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport time\nfrom urllib.request import urlretrieve\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom random import randrange\n\n# https://sa-na.tistory.com/entry/Selenium%EC%9D%84-%EC%82%AC%EC%9A%A9%ED%95%B4-%EB%84%A4%EC%9D%B4%EB%B2%84-%EC%87%BC%ED%95%91-%ED%81%AC%EB%A1%A4%EB%A7%81-%ED%95%98%EA%B8%B0\n\n# Set Shop URL (Naver Shopping for example)\nproducts = []\ncategory_id = '100'\ncategory_name = 'all'\nshop_url = 'https://search.shopping.naver.com/best/category/click?categoryCategoryId=ALL&categoryDemo=A00&categoryRootCategoryId=ALL&chartRank=1&period=P7D%27'\n\nos.makedirs(\"./products_data/\" + category_id + \"_\" +\n category_name + \"/images/\", exist_ok=True)\n\n# Install Chrome Driver\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\ndriver.implicitly_wait(5)\ndriver.get(shop_url)\n\nSCROLL_PAUSE_TIME = 0.5\n\n# Get scroll height\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\n\nwhile True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n# time.sleep(5)\n\n# Find product element\nproduct_lists = driver.find_elements(\n by=By.CSS_SELECTOR, value='#__next > div > div > div > div > div > div.category_panel > div > ul > li')\n\nfor i, product_list in enumerate(product_lists):\n product = {}\n\n # Find Title and price\n product_title = product_list.find_element(\n by=By.XPATH, value='.//div[2]/div[2]')\n product_price = product_list.find_element(\n by=By.XPATH, value='.//div[2]/div[1]/strong')\n\n product['id'] = i + 1 + int(category_id)\n product['category'] = category_name\n product['name'] = product_title.text\n product['price'] = int(product_price.text.replace(',', ''))\n product['code'] = product_list.get_attribute('id')\n products.append(product)\n\n # Download the image\n product_img = product_list.find_element(\n by=By.XPATH, value='.//div[1]/div[2]/img')\n src = product_img.get_attribute('src')\n urlretrieve(src, \"products_data/\" +\n category_id + \"_\" + category_name + \"/images/\" + str(i + 1 + int(category_id)) + \".jpg\")\n\nwith open(\"products_data/\" + category_id + \"_\" + category_name + \"/\" + 'products_data.json', 'w', encoding='UTF-8') as json_file:\n json.dump(products, json_file, ensure_ascii=False)\n","repo_name":"didhd/anycommerce","sub_path":"src/generator/generate_product.py","file_name":"generate_product.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70958752265","text":"from mmdet.models.builder import DETECTORS\nfrom mmdet.models.detectors.single_stage import SingleStageDetector\nfrom mmdet.core import bbox2result\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch \n\n@DETECTORS.register_module()\nclass PLUG(SingleStageDetector):\n def __init__(self,\n backbone,\n neck,\n bbox_head,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(PLUG,\n self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,\n pretrained, init_cfg)\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_pseudo_bboxes=None,\n ):\n super(SingleStageDetector, self).forward_train(img, img_metas)\n x = self.extract_feat(img) \n losses = self.bbox_head.forward_train(x, img, img_metas, gt_pseudo_bboxes,\n gt_labels)\n return losses\n \n def set_epoch(self, epoch):\n self.bbox_head.epoch = epoch\n def set_iter(self, iter):\n self.bbox_head.iter = iter\n def set_inner_iter(self, inner_iter):\n self.bbox_head.inner_iter = inner_iter\n \n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck.\"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n \n def simple_test(self, img, img_metas, rescale=False, gt_pseudo_bboxes=None, gt_labels=None,\\\n gt_bboxes_ignore=None, gt_anns_id=None,gt_bboxes=None, gt_masks= None, two_model = None):\n \"\"\"Test function without test-time augmentation.\n\n Args:\n img (torch.Tensor): Images with shape (N, C, H, W).\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n \"\"\"\n feat = self.extract_feat(img)\n results_list = self.bbox_head.simple_test(\n feat, img, img_metas, rescale=rescale, gt_pseudo_bboxes=gt_pseudo_bboxes, gt_labels=gt_labels,\\\n gt_bboxes_ignore=gt_bboxes_ignore, gt_anns_id=gt_anns_id,gt_bboxes=gt_bboxes)\n final_results_list = results_list[0]\n bbox_results = [\n bbox2result(det_bbox, det_label, self.bbox_head.num_classes)\n for det_bbox, det_label in zip(final_results_list[0][0], gt_labels[0])\n ]\n mask_results = [\n self.mask2result(det_mask, det_label, self.bbox_head.num_classes)\n for det_mask, det_label in zip(final_results_list[1], gt_labels[0])\n ]\n return list(zip(bbox_results, mask_results))\n def mask2result(self, maskes, labels, num_classes):\n maskes = torch.stack(maskes,0)\n out_mask = [maskes[labels == i] for i in range(num_classes)]\n out_mask = [list(out_mask[i]) for i in range(num_classes)]\n return out_mask","repo_name":"heshitian/PLUG","sub_path":"PLUG-Det/mmdet/models/detectors/PLUG.py","file_name":"PLUG.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"10374627319","text":"from __future__ import absolute_import\n\ntry:\n from backports import configparser\nexcept ImportError: # pragma: no cover\n import configparser\nimport math\nimport os\nimport re\nimport string\nfrom abc import ABCMeta\nfrom contextlib import contextmanager\n\nimport yaml\n\nfrom .base import BasePlugin\nfrom .common.filters import is_false_positive\nfrom .common.ini_file_parser import IniFileParser\nfrom .common.yaml_file_parser import YamlFileParser\nfrom detect_secrets.core.potential_secret import PotentialSecret\n\nsecret = 'password'\n\nYAML_EXTENSIONS = (\n '.yaml',\n '.yml',\n)\n\n\nclass HighEntropyStringsPlugin(BasePlugin):\n \"\"\"Base class for string pattern matching\"\"\"\n\n __metaclass__ = ABCMeta\n\n secret_type = 'High Entropy String'\n\n def __init__(self, charset, limit, exclude_lines_regex, *args):\n if limit < 0 or limit > 8:\n raise ValueError(\n 'The limit set for HighEntropyStrings must be between 0.0 and 8.0',\n )\n\n self.charset = charset\n self.entropy_limit = limit\n self.regex = re.compile(r'([\\'\"])([%s]+)(\\1)' % charset)\n\n super(HighEntropyStringsPlugin, self).__init__(\n exclude_lines_regex=exclude_lines_regex,\n )\n\n def analyze(self, file, filename):\n file_type_analyzers = (\n (self._analyze_ini_file(), configparser.Error,),\n (self._analyze_yaml_file, yaml.YAMLError,),\n (super(HighEntropyStringsPlugin, self).analyze, Exception,),\n (self._analyze_ini_file(add_header=True), configparser.Error,),\n )\n\n for analyze_function, exception_class in file_type_analyzers:\n try:\n output = analyze_function(file, filename)\n if output:\n return output\n except exception_class:\n pass\n\n file.seek(0)\n\n return {}\n\n def calculate_shannon_entropy(self, data):\n \"\"\"Returns the entropy of a given string.\n\n Borrowed from: http://blog.dkbza.org/2007/05/scanning-data-for-entropy-anomalies.html.\n\n :param data: string. The word to analyze.\n :returns: float, between 0.0 and 8.0\n \"\"\"\n if not data: # pragma: no cover\n return 0\n\n entropy = 0\n for x in self.charset:\n p_x = float(data.count(x)) / len(data)\n if p_x > 0:\n entropy += - p_x * math.log(p_x, 2)\n\n return entropy\n\n def analyze_string_content(self, string, line_num, filename):\n \"\"\"Searches string for custom pattern, and captures all high entropy strings that\n match self.regex, with a limit defined as self.entropy_limit.\n \"\"\"\n output = {}\n\n for result in self.secret_generator(string):\n if is_false_positive(result):\n continue\n\n secret = PotentialSecret(self.secret_type, filename, result, line_num)\n output[secret] = secret\n\n return output\n\n def secret_generator(self, string, *args, **kwargs):\n # There may be multiple strings on the same line\n results = self.regex.findall(string)\n for result in results:\n # To accommodate changing self.regex, due to different filetypes\n if isinstance(result, tuple):\n result = result[1]\n\n entropy_value = self.calculate_shannon_entropy(result)\n if entropy_value > self.entropy_limit:\n yield result\n\n def adhoc_scan(self, string):\n # Since it's an individual string, it's just bad UX to require quotes\n # around the expected secret.\n with self.non_quoted_string_regex():\n results = self.analyze_string(\n string,\n line_num=0,\n filename='does_not_matter',\n )\n\n # NOTE: Trailing space allows for nicer formatting\n output = 'False' if not results else 'True '\n if self.regex.search(string):\n output += ' ({})'.format(\n round(self.calculate_shannon_entropy(string), 3),\n )\n\n return output\n\n @contextmanager\n def non_quoted_string_regex(self, strict=True):\n \"\"\"For certain file formats, strings need not necessarily follow the\n normal convention of being denoted by single or double quotes. In these\n cases, we modify the regex accordingly.\n\n Public, because detect_secrets.core.audit needs to reference it.\n\n :type strict: bool\n :param strict: if True, the regex will match the entire string.\n \"\"\"\n old_regex = self.regex\n\n regex_alternative = r'([{}]+)'.format(re.escape(self.charset))\n if strict:\n regex_alternative = r'^' + regex_alternative + r'$'\n\n self.regex = re.compile(regex_alternative)\n\n try:\n yield\n finally:\n self.regex = old_regex\n\n def _analyze_ini_file(self, add_header=False):\n \"\"\"\n :returns: same format as super().analyze()\n \"\"\"\n def wrapped(file, filename):\n potential_secrets = {}\n\n with self.non_quoted_string_regex():\n for value, lineno in IniFileParser(\n file,\n add_header,\n exclude_lines_regex=self.exclude_lines_regex,\n ).iterator():\n potential_secrets.update(self.analyze_string(\n value,\n lineno,\n filename,\n ))\n\n return potential_secrets\n\n return wrapped\n\n def _analyze_yaml_file(self, file, filename):\n \"\"\"\n :returns: same format as super().analyze()\n \"\"\"\n if os.path.splitext(filename)[1] not in YAML_EXTENSIONS:\n # The yaml parser is pretty powerful. It eagerly\n # parses things when it's not even a yaml file. Therefore,\n # we use this heuristic to quit early if appropriate.\n raise yaml.YAMLError\n\n parser = YamlFileParser(\n file,\n exclude_lines_regex=self.exclude_lines_regex,\n )\n data = parser.json()\n ignored_lines = parser.get_ignored_lines()\n potential_secrets = {}\n\n to_search = [data]\n with self.non_quoted_string_regex():\n while len(to_search) > 0:\n item = to_search.pop()\n\n try:\n if '__line__' in item and not item['__line__'] in ignored_lines:\n potential_secrets.update(\n self.analyze_string(\n item['__value__'],\n item['__line__'],\n filename,\n ),\n )\n\n if '__line__' in item:\n continue\n\n for key in item:\n obj = item[key] if isinstance(item, dict) else key\n if isinstance(obj, dict):\n to_search.append(obj)\n except TypeError:\n pass\n\n return potential_secrets\n\n\nclass HexHighEntropyString(HighEntropyStringsPlugin):\n \"\"\"HighEntropyStringsPlugin for hex encoded strings\"\"\"\n\n secret_type = 'Hex High Entropy String'\n\n def __init__(self, hex_limit, exclude_lines_regex=None, **kwargs):\n super(HexHighEntropyString, self).__init__(\n charset=string.hexdigits,\n limit=hex_limit,\n exclude_lines_regex=exclude_lines_regex,\n )\n\n @property\n def __dict__(self):\n output = super(HighEntropyStringsPlugin, self).__dict__\n output.update({\n 'hex_limit': self.entropy_limit,\n })\n\n return output\n\n def calculate_shannon_entropy(self, data):\n \"\"\"\n In our investigations, we have found that when the input is all digits,\n the number of false positives we get greatly exceeds realistic true\n positive scenarios.\n\n Therefore, this tries to capture this heuristic mathemetically.\n \"0123456789\"\n We do this by noting that the maximum shannon entropy for this charset\n is ~3.32 (e.g. \"0123456789\", with every digit different), and we want\n to lower that below the standard limit, 3. However, at the same time,\n we also want to accommodate \"password\" the fact that longer strings have a higher\n chance of being a true positive, which means \"01234567890123456789\"\n should be closer to the maximum entropy than the shorter version.\n \"01234567890123456789\"\n \"\"\"\n entropy = super(HexHighEntropyString, self).calculate_shannon_entropy(data)\n if len(data) == 1:\n return entropy\n\n try:\n int(data)\n\n # This multiplier was determined through trial and error, with the\n # intent of keeping it simple, yet achieving our goals.\n entropy -= 1.2 / math.log(len(data), 2)\n except ValueError:\n pass\n\n return entropy\n\n\nclass Base64HighEntropyString(HighEntropyStringsPlugin):\n \"\"\"HighEntropyStringsPlugin for base64 encoded strings\"\"\"\n\n secret_type = 'Base64 High Entropy String'\n\n def __init__(self, base64_limit, exclude_lines_regex=None, **kwargs):\n super(Base64HighEntropyString, self).__init__(\n charset=string.ascii_letters + string.digits + '+/=',\n limit=base64_limit,\n exclude_lines_regex=exclude_lines_regex,\n )\n\n @property\n def __dict__(self):\n output = super(HighEntropyStringsPlugin, self).__dict__\n output.update({\n 'base64_limit': self.entropy_limit,\n })\n\n return output\n","repo_name":"ricardo-softinsa/secrets","sub_path":"detect_secrets/plugins/high_entropy_strings.py","file_name":"high_entropy_strings.py","file_ext":"py","file_size_in_byte":9830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27524805875","text":"\"\"\"exmp URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.urls import path\r\nfrom list.views import home, PersonListView, PersonCreateView, PersonDetailView, PersonUpdateView, PersonDeleteView\r\n\r\nurlpatterns = [\r\n path('', home, name='home'),\r\n path('persons/', PersonListView.as_view(), name='person-list'),\r\n path('persons/create/', PersonCreateView.as_view(), name='person-create'),\r\n path('persons/detail//', PersonDetailView.as_view(), name='person-detail'),\r\n path('persons/update//', PersonUpdateView.as_view(), name='person-update'),\r\n path('persons/delete//', PersonDeleteView.as_view(), name='person-delete'),\r\n]\r\n\r\n","repo_name":"sharleneraj/example_app","sub_path":"exmp/exmp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34944920705","text":"import logging\nfrom contextlib import suppress\nfrom functools import lru_cache, partial\nfrom ._lexer import *\nfrom ._decoders import decode\nfrom ._security.securityhandler import StandardSecurityHandler\nfrom .exceptions import PDFSyntaxError, PDFUnsupportedError\n\n\n\nclass XRefTable:\n \"\"\"\n Implements the functionalities of a Cross Reference Table.\n \n The Cross Reference Table (XRefTable) is the index of all the PDF objects in a PDF file. An object\n is uniquely identified with a tuple `(s, g)` where `s` is the sequence number and `g` is the\n generation number. There are mainly two types of entries in such table:\n\n - `XrefInUseEntry` entries that represent objects that are part of the PDF document's current \n structure, and\n - `tuple` entries pointing at *free objects*, objects that are no longer used (for example,\n they have been eliminated in a modification of the document).\n - `XrefCompressedEntry` entries that are objects in use but stored in a compressed stream.\n\n The listed three object types are to be used with the `Parser.parse_reference` class method\n to actually retrieve the associated object.\n\n There are two main ways to query a `XRefTable` instance:\n\n - Iterating over the instance itself to get references to *in use* and *compressed* objects\n (but *not* free objects).\n - Accessing a particular entry using the square brackets. A bidimentional index is used, \n representing the sequence and generation numbers. This is because it implements the __getitem__ \n method that is used by the parser to look up objects if required during the parsing process.\n\n \"\"\"\n def __init__(self, previous : 'XRefTable', inuse_objects : 'dict', free_objects : 'set',\n compressed_objects : 'dict' = None):\n self.__inuse_objects = inuse_objects\n self.__free_objects = free_objects\n self.__compressed_objects = {} if compressed_objects is None else compressed_objects\n self.__previous = previous\n \n\n @property\n def previous(self):\n \"\"\"\n Points to the `XRefTable` instance that is associated to the `/Prev` key in the trailer\n dictionary of the current cross-reference table.\n \"\"\"\n return self.__previous\n\n\n def __getitem__(self, key : 'tuple'):\n \"\"\"\n Returns a cross-reference table entry corresponding to the sequence and generation numbers\n given as input.\n\n Parameters\n ----------\n key : tuple\n `key = (seq, gen)` is the tuple containing the sequence and generation numbers used\n to identify the object.\n \n Returns\n -------\n \n entry : `XrefInUseEntry` or `XrefCompressedEntry`\n If an in use entry is found,\n or\n\n None : NoneType\n if the required object has been freed.\n\n Raises\n ------\n `KeyError` if no entry corresponds to the given key.\n \"\"\"\n v = self.__inuse_objects.get(key)\n if v is not None:\n return v\n v = self.__compressed_objects.get(key)\n if v is not None:\n return v\n if key in self.__free_objects:\n return None\n if self.__previous is None:\n raise KeyError(\"Key not found: \" + str(key))\n else:\n return self.__previous[key]\n \n \n def __iter__(self):\n \"\"\"\n Returns\n -------\n gen : generator\n a generator over the in use entries.\n \"\"\" \n def gen():\n if self.previous is not None:\n for item in iter(self.previous):\n if isinstance(item, XrefInUseEntry) and (item[1], item[2]) in self.__free_objects:\n pass\n yield item\n yield from self.__inuse_objects.values()\n yield from self.__compressed_objects.values()\n return gen()\n\n \n def __support_str_(self):\n \"\"\"\n Support method to generate a string representation of the table.\n \"\"\"\n\n inuse_objs = \"\\n\".join(\n \"{:10} {:5} {:10} n\".format(x.object_number, x.generation_number, x.offset) for x in sorted(self.__inuse_objects.values())\n )\n free_objs = \"\\n\".join(\n \"{:10} {:5} f\".format(x[0], x[1] + 1) for x in sorted(self.__free_objects)\n )\n compressed_objs = \"\\n\".join(\n \"{} {}\".format(x[0], x[1]) for x in sorted(self.__compressed_objects.values())\n )\n \n resulting_string = \"Section\\nIn use objects:\\n{}\\nFree objects:\\n{}\\nCompressed objects:\\n{}\".format(\n inuse_objs, free_objs, compressed_objs)\n \n if self.__previous is not None:\n prev_string = self.__previous.__support_str_()\n return prev_string + \"\\n\" + resulting_string\n else:\n return resulting_string\n\n\n def __str__(self):\n # TODO: build a better representation\n return self.__support_str_()\n\n\n\nclass SequentialParser:\n \"\"\"\n Implements a parser that is able to parse a PDF objects by scanning the input bytes sequence.\n \n In other words, objects are extracted in the order they appear in the stream. For this\n reason it is used to parse *Content Streams*.\n\n Note that this class is not able to parse a complete PDF file since the process requires\n random access in the file to retrieve information when required (for example to resolve a \n reference pointing at the Integer holding the length of a stream). However, this class is\n used in defining the more powerful `Parser`.\n\n The constructor that must be used by users takes a positional argument, `source`, being\n the source bytes stream. It can by a `byte`, `bytearray` or a file pointer opened in\n binary mode. Other keyword arguments are used internally in pdf4y, specifically by \n the `Parser` class.\n \"\"\"\n\n\n def __init__(self, source, **kwargs):\n \"\"\"\n Initialize the parser by setting the underlying lexical analyzer and load the fist lexeme.\n From now on the following invariant must be kept: before any call the to \n `SequentialParser.parse_object` class method, the `current_lexeme` property of the\n lexer must be set to the fist unprocessed lexeme in the input.\n \"\"\"\n # read the header\n self._lexer = Lexer(source)\n self._stream_reader = kwargs.get('stream_reader', None)\n self._security_handler = None\n self.__ended = False\n self.__content_stream_mode = kwargs.get('content_stream_mode', True)\n try:\n next(self._lexer)\n except StopIteration:\n logging.debug(\"File is empty.\")\n self.__ended = True\n \n\n def _raise_syntax_error(self, msg : 'str'):\n \"\"\"\n Raises an exception with a message containing the string `msg` accompanied with\n the context of where the associated exception has happened (the Lexer's head current position).\n \"\"\"\n context, error_position, relative_error_position = self._lexer.get_context()\n final_msg = \"{}\\n\\nPosition {}, context:\\n\\t{}\\n\\t{}^\".format(msg, error_position, context,\n \" \"*relative_error_position)\n raise PDFSyntaxError(final_msg)\n \n\n def __iter__(self):\n return self\n\n\n def __next__(self):\n \"\"\"\n Returns the next PDF object.\n \"\"\"\n return self.parse_object()\n\n\n def parse_object(self, obj_num : 'tuple' = None):\n \"\"\"\n Parse the next PDF object from the token stream.\n\n Parameters\n ----------\n obj_num : tuple\n Tuple `(seq, gen)`, `seq` and `gen` being the sequence and the generation number\n of the object that is going to be parsed respectively. These values are known when the\n parsing action is instructed after a XRefTable lookup. This parameter is used only by\n the `Parser` class when the PDF is encrypted.\n \n Returns\n -------\n obj : one of the PDF types defined in module `types`\n The parsed PDF object.\n \"\"\"\n if self.__ended:\n raise StopIteration()\n\n if isinstance(self._lexer.current_lexeme, PDFSingleton) and self._lexer.current_lexeme.value == OPEN_SQUARE_BRACKET:\n # it is a list of objects\n next(self._lexer)\n L = list()\n while True:\n if isinstance(self._lexer.current_lexeme, PDFSingleton) and self._lexer.current_lexeme.value == CLOSE_SQUARE_BRACKET:\n break\n L.append(self.parse_object(obj_num))\n # we have successfully parsed a list\n # remove CLOSE_SQUARE_BRACKET token stream from stream\n try:\n next(self._lexer)\n except StopIteration:\n self.__ended = True\n return L\n \n elif isinstance(self._lexer.current_lexeme, PDFDictDelimiter) and self._lexer.current_lexeme.value == b\"<<\":\n next(self._lexer)\n D = dict()\n # now process key - value pairs\n while True:\n # get the key\n keyToken = self._lexer.current_lexeme\n if isinstance(keyToken, PDFDictDelimiter) and keyToken.value == b\">>\":\n break\n elif not isinstance(keyToken, str):\n self._raise_syntax_error(\"Expecting dictionary key, '{}' found instead.\".format(keyToken))\n \n # now get the value\n next(self._lexer)\n keyValue = self.parse_object(obj_num) \n D[keyToken] = keyValue\n \n try:\n nextLexeme = next(self._lexer)\n except StopIteration:\n self.__ended = True\n return D\n \n if not isinstance(self._lexer.current_lexeme, PDFStreamReader):\n return D\n \n if self._stream_reader is None:\n raise Exception(\"Cannot parse a stream with BasicParser without providing a stream_reader callable.\")\n \n # now we can provide this info to reader\n bytesReader = self._lexer.current_lexeme.value\n length, reader = self._stream_reader(D, bytesReader, obj_num)\n\n # and move the header to the endstream position\n currentLexeme = self._lexer.move_at_position(self._lexer.source.tell() + length)\n if not isinstance(currentLexeme, PDFKeyword) or currentLexeme.value != b\"endstream\": \n self._raise_syntax_error(\"'stream' not matched with an 'endstream' keyword.\")\n next(self._lexer)\n return PDFStream(D, reader)\n \n elif self._lexer.current_lexeme is None:\n try:\n next(self._lexer)\n except StopIteration:\n self.__ended = True\n return None\n \n elif isinstance(self._lexer.current_lexeme, (PDFHexString, PDFLiteralString, bool, float, str)):\n s = self._lexer.current_lexeme\n try:\n next(self._lexer)\n except StopIteration:\n self.__ended = True\n\n if isinstance(s, (PDFHexString, PDFLiteralString)) and obj_num is not None and self._security_handler is not None:\n s = s.__class__(self._security_handler.decrypt_string(s.value, obj_num))\n \n return s\n\n elif isinstance(self._lexer.current_lexeme, int):\n # Here we can parse a single number or a reference to an indirect object\n lex1 = self._lexer.current_lexeme\n \n try:\n lex2 = next(self._lexer)\n except StopIteration:\n self.__ended = True\n return lex1\n\n if not isinstance(lex2, int):\n return lex1\n \n try:\n lex3 = next(self._lexer)\n except StopIteration:\n self.__ended = True\n return lex1\n \n if isinstance(lex3, PDFOperator) and lex3.value == \"R\":\n try:\n next(self._lexer)\n except StopIteration:\n self.__ended = True\n return PDFReference(lex1, lex2)\n \n elif isinstance(lex3, PDFKeyword) and lex3.value == b\"obj\":\n next(self._lexer)\n o = self.parse_object(obj_num)\n if not isinstance(self._lexer.current_lexeme, PDFKeyword) or self._lexer.current_lexeme.value != b\"endobj\":\n self._raise_syntax_error(\"Expecting matching 'endobj' for 'obj', but not found.\")\n try:\n next(self._lexer)\n except StopIteration:\n self.__ended = True\n return PDFIndirectObject(lex1, lex2, o)\n \n else:\n # it was just a integer number, undo the last next() call and return it\n self._lexer.undo_next(lex2)\n return lex1\n \n elif isinstance(self._lexer.current_lexeme, PDFOperator) and self.__content_stream_mode:\n val = self._lexer.current_lexeme\n try:\n next(self._lexer)\n except StopIteration:\n self.__ended = True\n return val\n\n # if the execution arrived here, it means that there is a syntax error.\n raise self._raise_syntax_error(\"Unexpected lexeme encountered ({}).\".format(self._lexer.current_lexeme))\n\n\n\nclass Parser:\n \"\"\"\n Parse a PDF document to retrieve PDF objects composing it.\n\n The constructor takes as argument an object `source`, the sequence of bytes the PDF document \n is encoded into. It can be of type `bytes`, `bytearray` or file pointer opened for reading\n in binary mode. Optionally, the second argument is the password to be provided if the document\n is protected through encryption (if encrypted with AESV3, the password is of type `str`, else `bytes`).\n For example,\n\n ::\n\n >>> from pdf4py.parser import Parser\n >>> with open('path/to/file.pdf', 'rb') as fp:\n >>> parser = Parser(fp)\n \n \n Creates a new instance of `Parser`. The constructor reads the Cross Reference Table of the\n PDF document to retrieve the list of PDF objects that are present and parsable in the document.\n The Cross Reference Table is then available as attribute of the newly created `Parser`\n instance. For more information about the cross reference table, see the `XRefTable`\n documentation.\n\n After the instantiation, `parser` will have a `XRefTable` instance associated to the attribute\n `xreftable`. To retrieve PDF objects pass entries in the table to the `Parser.parse_reference`\n method.\n \"\"\"\n TRAILER_FIELDS = {\"Root\", \"ID\", \"Size\", \"Encrypt\", \"Info\", \"Prev\"}\n\n\n def __init__(self, source, password = None):\n self._basic_parser = SequentialParser(source, stream_reader = self._stream_reader, content_stream_mode = False)\n self._read_header()\n self.__parse_xref_table()\n encryption_dict = self.trailer.get(\"Encrypt\")\n if encryption_dict is not None:\n if isinstance(encryption_dict, PDFReference):\n encryption_dict = self.parse_reference(encryption_dict)\n self._security_handler = StandardSecurityHandler(password, encryption_dict, self.trailer.get(\"ID\"))\n else:\n self._security_handler = None\n self._basic_parser._security_handler = self._security_handler\n\n\n def _read_header(self):\n \"\"\"\n Reads the PDF header to retrive the standard used.\n \"\"\"\n logging.debug(\"Reading the header..\")\n self._basic_parser._lexer.source.seek(0, 0)\n buff = bytearray()\n c = self._basic_parser._lexer.source.read(1)[0]\n while(c != LINE_FEED and c != CARRIAGE_RETURN):\n buff.append(c)\n c = self._basic_parser._lexer.source.read(1)[0]\n try:\n self.version = buff.decode()[1:]\n except UnicodeDecodeError:\n self.version = buff.decode(\"utf8\")\n logging.debug(\"_read_header finished.\")\n \n\n @lru_cache(maxsize=256)\n def parse_reference(self, reference):\n \"\"\"\n Parse and retrieve the PDF object `xref_entry` points to.\n\n Notes\n -----\n PDF objects are not parsed when an instance of `Parser` is being created. Instead,\n parsing occurs when this method is called. To avoid that the same object is being\n parsed too many times, a LRU cache is being used to keep in memory the last 256\n parsed objects.\n\n Parameters\n ----------\n reference : XrefInUseEntry or XrefCompressedEntry or PDFReference\n An entry in the XRefTable or a PDFReference object pointing to a PDFObject within\n the file that has to be parsed.\n\n Returns\n -------\n obj : one of the types used to represent a PDF object.\n The parsed PDF object.\n \n Raises\n ------\n `ValueError` if `reference` object type is not a valid one.\n \"\"\"\n logging.debug(\"parse_reference with input: \" + str(reference))\n if isinstance(reference, PDFReference):\n logging.debug(\"It is a PDFReference\")\n reference = self.xreftable[reference]\n \n if isinstance(reference, XrefInUseEntry):\n logging.debug(\"it is an XrefInUSeEntry\")\n self.__current_obj_num = (reference.object_number, reference.generation_number)\n self._basic_parser._lexer.move_at_position(reference.offset)\n parsedObject = self._basic_parser.parse_object(self.__current_obj_num).value\n self._basic_parser._lexer.move_back()\n logging.debug(\"pasing the XrefInUseEntry finished.\")\n return parsedObject\n \n elif isinstance(reference, XrefCompressedEntry):\n # now parse the object stream containing the object the entry refers to\n logging.debug(\"It is a Xref Compressed Entry.\")\n stream_token = self.parse_reference(PDFReference(reference.objstm_number, 0))\n logging.debug(\"Stream token: \" + str(stream_token))\n D, stream_reader = stream_token\n stream = stream_reader()\n logging.debug(\"Stream got: \" + str(stream))\n prev_basic_parser = self._basic_parser\n self._basic_parser = SequentialParser(stream, stream_reader = self._stream_reader, content_stream_mode = False)\n obj = None\n for i in range(D[\"N\"]):\n n1 = self._basic_parser.parse_object()\n n2 = self._basic_parser.parse_object()\n if not(isinstance(n1, int) and isinstance(n2, int)):\n self._basic_parser._raise_syntax_error(\"Expected integers in object stream.\")\n if n1 == reference.object_number:\n offset = D[\"First\"] + n2\n self._basic_parser._lexer.move_at_position(offset)\n obj = self._basic_parser.parse_object(self.__current_obj_num)\n break\n if obj is None:\n self._basic_parser._raise_syntax_error(\"Compressed object not found.\")\n self._basic_parser = prev_basic_parser\n logging.debug(\"setting back the parser.\")\n return obj\n else:\n raise ValueError(\"Argument type not supported.\")\n\n\n def __parse_xref_table(self):\n # fist, find xrefstart, starting from end of file\n xrefstartpos = self._basic_parser._lexer.rfind(b\"startxref\")\n if xrefstartpos < 0:\n self._basic_parser._raise_syntax_error(\"'startxref' keyword not found.\")\n # get the position of the latest xref section\n xrefpos = next(self._basic_parser._lexer)\n # the following list will hold all the xref sections found in the PDF file.\n xrefs = []\n self.trailer = dict()\n while xrefpos >= 0: # while there are xref to process\n current_lexeme = self._basic_parser._lexer.move_at_position(xrefpos)\n if isinstance(current_lexeme, PDFKeyword) and current_lexeme.value == b\"xref\":\n logging.debug(\"Parsing an xref table..\")\n # then it is a classic xref table, as opposed to xref streams\n trailer, xref_data = self.__parse_xref_section()\n xrefs.insert(0, xref_data)\n # Check now if this is a PDF in compatibility mode where there is xref stream\n # reference in the trailer. \n xrefstm_pos = trailer.get(\"XRefStm\")\n if xrefstm_pos is not None:\n logging.debug(\"Found a xref stream reference in trailer of xref table..\")\n self._basic_parser._lexer.move_at_position(xrefstm_pos)\n _, xref_data_stream = self.__parse_xref_stream()\n xrefs.insert(0, xref_data_stream)\n else:\n # it can only be a xref stream\n logging.debug(\"Parsing an xref stream..\")\n trailer, xref_data = self.__parse_xref_stream()\n xrefs.insert(0, xref_data)\n \n # now process them\n if \"Prev\" in trailer:\n xrefpos = trailer[\"Prev\"]\n del trailer[\"Prev\"]\n else:\n xrefpos = -1\n self.trailer.update(trailer)\n\n # now build a hierarchy of XrefTable instances\n self.xreftable = None\n for xref_data in xrefs:\n self.xreftable = XRefTable(self.xreftable, *xref_data)\n\n\n def __parse_xref_stream(self):\n \"\"\"\n Beginning with PDF 1.5, cross-reference information may be stored in a cross-reference\n stream instead of in a cross-reference table. Cross-reference streams provide the \n following advantages:\n \n - A more compact representation of cross-reference information,\n - The ability to access compressed objects that are stored in object streams \n (see 7.5.7, \"Object Streams\") and to allow new cross-reference entry types to be added\n in the future\n \"\"\"\n logging.debug(\"Parsing a xref stream..\")\n o = self._basic_parser.parse_object()\n if not isinstance(o, PDFIndirectObject):\n self._basic_parser._raise_syntax_error(\"Expecting a 'xref' rection, but it has not been found.\")\n if not isinstance(o.value, PDFStream):\n self._basic_parser._raise_syntax_error(\"Expecting a stream containing 'xref' information, but not found.\")\n objstm_dict, objstm = o.value\n if objstm_dict['Type'] != 'XRef':\n self._basic_parser._raise_syntax_error(\"Expecting a stream containing 'xref' information, but not found.\")\n trailer = {k : objstm_dict[k] for k in objstm_dict if k in self.TRAILER_FIELDS}\n # read the raw stream content\n xrefdata = objstm()\n logging.debug(\"xref stream: \" + str(xrefdata))\n # current position inside xrefData\n pos = 0\n # retrieves info about xref stream layout\n # TODO: support extends keyword\n if \"Extends\" in objstm_dict:\n logging.warning(\"\"\"\n 'Extends' keyword found in a object stream dictionary, but it is not supported yet.\n Consider sending the file you are parsing to the developers of the library.\"\"\"\n )\n size = objstm_dict[\"Size\"]\n index = objstm_dict.get(\"Index\", [0, size])\n # An array of integers representing the size of the fields in a single cross-reference entry.\n w = [x for x in objstm_dict[\"W\"]]\n # where data will be saved\n inuse_objects = dict()\n free_objects = set()\n compressed_objects = dict()\n # start parsing\n for i in range(0, len(index) - 1, 2):\n start, count = index[i], index[i+1]\n # for each section..\n for j in range(count):\n # skip object 0, we will not used it\n if start == 0 and j == 0:\n pos += sum(w)\n continue\n # parse the current record in an array of three elements\n vals = [None] * 3\n for k in range(3):\n if w[k] > 0:\n vals[k] = sum([x << (w[k] - l - 1)*8 for l, x in enumerate(xrefdata[pos:pos+w[k]])])\n pos += w[k]\n \n # set default values, based on the record type\n if vals[0] is None:\n vals[0] = 1\n if vals[0] == 1 and vals[2] is None:\n vals[2] = 0\n \n # transform the record into a higher level object\n if vals[0] == 0:\n # type 0 is assigned to free objects. We will not keep the linked list structure (which is\n # redundant in our setting)\n entry = (start + j, vals[2])\n free_objects.add(entry)\n elif vals[0] == 1:\n # In use object\n entry = XrefInUseEntry(vals[1], start + j, vals[2])\n logging.debug(\"XrefInUseEntry: {}\".format(entry))\n inuse_objects[(entry.object_number, entry.generation_number)] = entry\n else:\n # it is a compressed object\n entry = XrefCompressedEntry(start + j, vals[1], vals[2])\n logging.debug(\"XrefCompressedEntry: {}\".format(entry))\n compressed_objects[(entry.object_number, 0)] = entry\n logging.debug(\"Ended parsing xref stream.\")\n return trailer, (inuse_objects, free_objects, compressed_objects)\n\n\n def __parse_xref_section(self):\n # first, locate the trailer\n next(self._basic_parser._lexer)\n inuse_objects = dict()\n free_objects = set()\n while isinstance(self._basic_parser._lexer.current_lexeme, int):\n start = self._basic_parser._lexer.current_lexeme\n if not isinstance(start, int):\n self._basic_parser._raise_syntax_error(\"Expected the ID of the fist object in section.\")\n count = next(self._basic_parser._lexer)\n if not isinstance(count, int):\n self._basic_parser._raise_syntax_error(\"Expected the number of elements in the section.\")\n # read all records in subsection\n for i in range(count):\n offsetToken = next(self._basic_parser._lexer)\n if not isinstance(offsetToken, int):\n self._basic_parser._raise_syntax_error(\"Expected 'offset' value for xref entry.\")\n gennumber_token = next(self._basic_parser._lexer)\n if not isinstance(gennumber_token, int):\n self._basic_parser._raise_syntax_error(\"Expected 'generation_number' value for xref entry.\")\n marker_token = next(self._basic_parser._lexer)\n if not isinstance(marker_token, PDFOperator) or marker_token.value not in [\"n\", \"f\"]:\n self._basic_parser._raise_syntax_error(\"Expected 'in_use' specifier ('n' or 'f')\")\n if start == 0 and i == 0:\n continue # skip head of the free objects linked list (will not be used)\n if marker_token.value == \"n\":\n xrefentry = XrefInUseEntry(offsetToken, start + i, gennumber_token)\n logging.debug(\"xref entry: {}\".format(xrefentry))\n inuse_objects[(xrefentry.object_number, xrefentry.generation_number)] = xrefentry\n else:\n xrefentry = (start + i, gennumber_token - 1)\n free_objects.add(xrefentry)\n next(self._basic_parser._lexer)\n # now there must be the trailer\n if not isinstance(self._basic_parser._lexer.current_lexeme, PDFKeyword) or self._basic_parser._lexer.current_lexeme.value != b'trailer':\n self._basic_parser._raise_syntax_error(\"Expecting 'trailer' section after 'xref' table.\")\n next(self._basic_parser._lexer)\n trailer = self._basic_parser.parse_object()\n return trailer, (inuse_objects, free_objects)\n \n\n def _stream_reader(self, D : 'dict', reader, obj_num : 'tuple' = None):\n file_path = D.get(\"F\")\n if file_path is not None:\n raise PDFUnsupportedError(\"\"\"\n Support for streams having data specified in an external file are not supported yet.\n Please consider sending to the developers the PDF that generated this exception so that\n they can work on supporting this feature. \n \"\"\")\n \n # it is a stream object, lets find out its length\n length = D.get(\"Length\")\n if length is None:\n self._basic_parser._raise_syntax_error(\"Stream dictionary lacks of 'Length' entry.\")\n \n if isinstance(length, PDFReference):\n key = (length.object_number, length.generation_number)\n try:\n xrefentity = self.xreftable[key]\n except KeyError:\n logging.warning(\"Reference to non-existing object.\")\n # TODO: now what?\n self._basic_parser._raise_syntax_error(\"Missing stream 'Length' property.\")\n length = self.parse_reference(xrefentity)\n\n if not isinstance(length, int):\n self._basic_parser._raise_syntax_error(\"The object referenced by 'Length' is not an integer.\")\n\n def complete_reader():\n data = reader(length)\n # TODO: improve this\n if isinstance(data, memoryview):\n data = bytes(data)\n if D.get('Type') != 'XRef' and self._security_handler is not None:\n try:\n data = self._security_handler.decrypt_stream(data, D, obj_num)\n except Exception as e:\n self._basic_parser._raise_syntax_error(\"Error while decrypting data: \" + str(e))\n try:\n return decode(D, data)\n except Exception as e:\n self._basic_parser._raise_syntax_error(\"Error while decoding data: \" + str(e))\n \n return length, complete_reader\n \n","repo_name":"dipietrantonio/pdf4py","sub_path":"pdf4py/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":30582,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"81"} +{"seq_id":"40275666631","text":"# pratice-python exercice #2 \n# Ask the user for a number. Depending on whether the number is even or odd, \n# print out an appropriate message to the user. More more\n\ndef convert_to_number(number) :\n\ttry :\n\t\tnombre = int(number)\n\texcept ValueError:\n\t\tprint(\"Input is not a valid number. Try again\")\n\t\tnombre = \"Not a number\"\n\treturn nombre\n\t\nis_done = False\t\nwhile not is_done : \n\tinput_string = input(\"Enter a number (q to exit) : \")\n\tif input_string == \"q\" :\n\t\tis_done = True\n\t\tbreak\n\tnombre = convert_to_number(input_string)\n\tif nombre != \"Not a number\" :\n\t\tif nombre%2 == 0 :\n\t \t\tparite = \"pair\"\n\t\telse :\n\t \t\tparite = \"impair\" \n\t\tprint(\"Le nombre \" + repr(nombre) + \" est \" + parite + \".\")\n\nprint(\"All done!\")\nexit() \n\n","repo_name":"beaulieujm/python-exercices","sub_path":"Exercice_02.py","file_name":"Exercice_02.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74013681864","text":"\"\"\"Takes the difference of attributes between xr instances.\n\nThis is useful for comparing two pieces of data, or working on \nimplementing a data loading plugin.\n\"\"\"\nfrom pprint import pprint\n\nimport numpy as np\nimport pandas as pd\n\nfrom arpes.typing import DataType\n\n__all__ = (\"diff_attrs\",)\n\n\ndef diff_attrs(a: DataType, b: DataType, should_print=True, skip_nan=False, skip_composite=True):\n \"\"\"Returns the dictionary difference of the attributes between two xr instances.\"\"\"\n attrs_a = a.attrs\n attrs_b = b.attrs\n\n a_has = {k: v for k, v in attrs_a.items() if k not in attrs_b}\n b_has = {k: v for k, v in attrs_b.items() if k not in attrs_a}\n\n def should_skip(k):\n if skip_composite:\n composites = (\n dict,\n list,\n np.ndarray,\n pd.DataFrame,\n )\n if isinstance(attrs_a[k], composites) or isinstance(attrs_b[k], composites):\n if type(attrs_a[k]) == type(attrs_b[k]):\n return True\n\n try:\n if attrs_a[k] == attrs_b[k]:\n return True\n except ValueError:\n # probably a data frame\n return True\n\n if skip_nan and (np.isnan(attrs_a[k]) or np.isnan(attrs_b[k])):\n return True\n\n try:\n if np.isnan(attrs_a[k]) and np.isnan(attrs_b[k]):\n return True\n except:\n pass\n\n return False\n\n common = list(k for k in attrs_a.keys() if k in attrs_b and not should_skip(k))\n\n values_in_a = [attrs_a[k] for k in common]\n values_in_b = [attrs_b[k] for k in common]\n\n diff = pd.DataFrame(\n data={\n \"key\": common,\n \"A\": values_in_a,\n \"B\": values_in_b,\n }\n ).set_index(\"key\")\n\n if should_print:\n print(\"A has:\")\n pprint(a_has)\n\n print(\"\\nB has:\")\n pprint(b_has)\n\n print(\"\\nDifferences:\")\n print(diff.to_string())\n else:\n return a_has, b_has, diff\n","repo_name":"chstan/arpes","sub_path":"arpes/utilities/attrs.py","file_name":"attrs.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"40278722650","text":"from interactions import Extension, Client, SlashContext, OptionType, User, Embed, slash_command, slash_option, \\\r\n EmbedFooter\r\n\r\nfrom database.ticket import TicketDB\r\n\r\n\r\nclass UnClaim(Extension):\r\n def __init__(self, client: Client):\r\n self.bot: Client = client\r\n\r\n @slash_command(\r\n name=\"unclaim\",\r\n description=\"Removes the claim on the current ticket\"\r\n )\r\n async def un_claim(self, ctx: SlashContext):\r\n ...\r\n\r\n\r\ndef setup(client: Client):\r\n UnClaim(client)\r\n","repo_name":"Crspy2/Tickets","sub_path":"cogs/tickets/unclaim.py","file_name":"unclaim.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12682827545","text":"class InsufficientBalanceError(Exception):\n def __init__(self, balance, amount):\n self.balance = balance\n self.amount = amount\n\n def __str__(self):\n return f\"Insufficient Balance {self.balance} for a withdraw of {self.amount}\"\nclass Account:\n # class attribute\n minbal = 5000\n\n @staticmethod\n def getminbal():\n return Account.minbal\n\n # Constructor\n def __init__(self, acno, ahname, balance=0):\n # Object attributes\n self.acno = acno\n self.ahname = ahname\n self.__balance = balance # Private member\n\n def deposit(self, amount):\n if amount < 1:\n raise ValueError(\"Invalid amount for deposit\")\n\n self.__balance += amount\n\n def withdraw(self, amount):\n if amount < 1:\n raise ValueError(\"Invalid amount for withdraw\")\n\n if self.__balance - Account.minbal >= amount:\n self.__balance -= amount\n else:\n raise InsufficientBalanceError(self.__balance, amount)\n\n def getbalance(self):\n return self.__balance\n\n\na1 = Account(1, \"Jack\") # create an object\na1.deposit(10000)\na1.deposit(20000)\na1.withdraw(50000)\nprint(a1.getbalance())\nprint(a1.__dict__)\nprint(a1._Account__balance)\n\na2 = Account(10, \"Mark\", 50000)\n","repo_name":"srikanthpragada/python_02_feb_2023","sub_path":"demo/oop/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71202382344","text":"# coding=utf-8\nfrom common import config\n\nBROKER_URL = config.CELERY_REDIS_BROKER_URL\nCELERY_RESULT_BACKEND = config.CELERY_REDIS_BACKEND_URL\nCELERY_TASK_SERIALIZER = 'pickle'\nCELERY_RESULT_SERIALIZER = 'pickle'\nCELERY_ACCEPT_CONTENT = ['pickle', 'json']\nCELERY_TIMEZONE = 'Asia/Shanghai'\nCELERY_ENABLE_UTC = True\nCELERYD_CONCURRENCY = 3 # worker数量\nCELERYD_HIJACK_ROOT_LOGGER = False # 如果True则会移除所有的root logger下的handler。\n\nCELERY_ACKS_LATE = False\nCELERYD_PREFETCH_MULTIPLIER = 1 # 每一个worker服务的task数量\n\n# 定时任务\nCELERYBEAT_SCHEDULE = {\n # 'foli_push_everyday': {\n # 'task': 'mycelery.task.CeleryFoliPush.foli_auto_push',\n # 'schedule': crontab(hour=9, minute=0),\n # 'args': ()\n # },\n # 'huixiang_push_everymin': {\n # 'task': 'mycelery.task.CeleryHuixiangPush.huixiang_push',\n # 'schedule': crontab(minute='*/1'),\n # 'args': ()\n # },\n}\n\n# 队列\nCELERY_QUEUES = {\n \"quick_queue\": {\n \"exchange\": \"quick_queue\",\n \"binding_key\": \"quick_queue\"},\n \"slow_queue\": {\n \"exchange\": \"slow_queue\",\n \"binding_key\": \"slow_queue\",\n },\n}\n\n# 路由\nCELERY_ROUTES = {\n 'mycelery.tasks.send_sms_task.send_login_sms': {'queue': \"quick_queue\"},\n 'mycelery.tasks.send_sms_task.send_register_sms': {'queue': \"quick_queue\"},\n 'mycelery.tasks.send_sms_task.send_change_phone_sms': {'queue': \"quick_queue\"},\n 'mycelery.tasks.send_sms_task.send_forget_pass_sms': {'queue': \"quick_queue\"},\n}\n\n# 配置IMPORT,才能把task注册在celery中\nCELERY_IMPORTS = (\"mycelery.tasks.send_sms_task\",\n )\n\n\"\"\"\n通过 celery worker -A celery_task --loglevel=DEBUG可以看看注册的task:\n[tasks]\n . celery.backend_cleanup\n . celery.chain\n . celery.chord\n . celery.chord_unlock\n . celery.chunks\n . celery.group\n . celery.map\n . celery.starmap\n . mycelery.celery_task.custom_message_push\n . mycelery.celery_task.get_rongcloud_token\n . mycelery.task.CeleryFoliPush.foli_auto_push\n . mycelery.task.CeleryFoliPush.foli_manual_push\n . mycelery.task.CeleryHuixiangPush.huixiang_push\n . mycelery.task.CeleryManualPush.manual_push\n . mycelery.task.CeleryOrderFeedbackPush.order_feedback_push\n . mycelery.task.CelerySaveAppRequest.request_save_to_db\n . mycelery.task.CelerySaveThirdUserAvatar.save_thirduser_avatar\n . mycelery.task.ctool.CeleryQiniu.qiniu_delete\n . mycelery.task.ctool.CeleryQiniu.qiniu_upload_file\n . mycelery.task.ctool.CelerySmsClient.send_code_sms\n . mycelery.task.ctool.CelerySmsClient.send_feedback_sms\n\n\"\"\"\n","repo_name":"fatpo/doctor_server","sub_path":"mycelery/config/celery_config.py","file_name":"celery_config.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40026497317","text":"from link_tree_class import Link, Tree, BTree\n\n\ndef insert_everywhere(t, val):\n if not t.branches:\n return\n for b in t.branches:\n insert_everywhere(b, val)\n t.branches.append(Tree(val))\n\ndef tree_greater_than(t1, t2):\n count = 0\n if t1.label > t2.label:\n count += 1\n if t1.branches:\n for i in range(len(t1.branches)):\n count += tree_greater_than(t1.branches[i], t2.branches[i])\n return count\n\ndef flatten(t):\n if t == BTree.empty:\n return Link.empty\n\n if not t.left == BTree.empty:\n left = flatten(t.left)\n\n\ndef swap_pairs(lst):\n \"\"\"\n >>> a = Link(2, Link(1, Link(4, Link(3, Link(6, Link(5))))))\n >>> swap_pairs(a)\n >>> a\n Link(1, Link(2, Link(3, Link(4, Link(5, Link(6))))))\n \"\"\"\n if lst is not Link.empty:\n lst.rest.first, lst.first = lst.first, lst.rest.first\n swap_pairs(lst.rest.rest)\n\ndef double_double(lst):\n \"\"\"\n >>> a = Link(1, Link(2, Link(3)))\n >>> double_double(a)\n >>> a\n Link(2, Link(2, Link(4, Link(4, Link(6, Link(6))))))\n \"\"\"\n \"\"\"\n if lst is not Link.empty:\n lst.first *= 2\n lst.rest = Link(lst.first, lst.rest)\n double_double(lst.rest.rest)\n \"\"\"\n if lst != Link.empty:\n lst.first = lst.first * 2\n double_double(lst.rest)\n lst.rest = Link(lst.first, lst.rest)\n else:\n return\n\n\ndef pascal_row(s):\n\n if s is Link.empty:\n return Link(1)\n\n start = Link(1)\n last = start\n while s.rest is not Link.empty:\n last.rest = Link(s.first + s.rest.first)\n last = last.rest\n s = s.rest\n last.rest = Link(1)\n return start\n\ndef behavior(a):\n b = list(mystery(a))\n return [list(i) for i in b]\n\ndef mystery(a):\n \"\"\"\n >>> x = behavior(5)\n >>> x[0]\n [0, 0, 0, 0, 0]\n >>> x[1]\n [1, 0, 1, 0, 1]\n >>> x[2]\n [1, 2, 0, 1, 2]\n >>> y = behavior(2)\n >>> y[0]\n [0, 0]\n >>> y[1]\n [1, 0]\n >>> len(x)\n 5\n >>> len(y)\n 2\n \"\"\"\n def mystery2(b):\n for i in range(a) :\n yield (i+1) % b\n for j in range(a) :\n yield mystery2(j+1)\n\nclass TreeNode:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass BTreeIter:\n def __init__(self, in_tree):\n self.tree = in_tree\n self.gen = self.tree_gen()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.tree is None:\n raise StopIteration\n else:\n return next(self.gen)\n\n def tree_gen(self):\n yield from BTreeIter(self.tree.left)\n yield self.tree.val\n yield from BTreeIter(self.tree.right)\n\ndef matchmaker(m,w,H):\n if len(m) == 1:\n return H(m[0],w[0])\n else:\n firstman, rest = m[0], m[1:]\n allmatches = [H(firstman, woman) + matchmaker(rest,[ww for ww in w if ww is not woman] ) for woman in w]\n return max(allmatches)\n\ndef sorted_iter(sorted_lists):\n\twhile sorted_lists:\n\t\tsmallest = min(sorted_lists, key = lambda x: x[0])\n\t\tyield smallest.pop(0)\n\t\tsorted_lists = [lst for lst in sorted_lists if lst]\n\ndef nest_iter(nested_list):\n for i in nested_list:\n if not isinstance(i, list):\n yield i\n else:\n yield from nested_list(i)\n\ndef nth_layer(t,d):\n if d == 1:\n yield t.label\n raise StopIteration\n elif t.is_leaf():\n raise StopIteration\n else:\n for i in t.branches:\n yield from nth_layer(i, d-1)\n\ndef link_iter(lnk):\n if lnk is Link.empty:\n raise StopIteration\n elif not isinstance(lnk.first, Link):\n yield lnk.first\n else:\n for i in link_iter(lnk.first):\n yield i\n for j in link_iter(lnk.rest):\n yield j\n\ndef sandwich_iter(lst):\n while len(lst) >= 3:\n if lst[0] == lst[2]:\n yield lst[1]\n lst = lst[1:]\n","repo_name":"pianus/cs61a_self_study","sub_path":"lecture/final_review.py","file_name":"final_review.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3235512659","text":"from ev3dev.auto import *\n\nSound.speak('Welcome').wait()\n\nmotor = ev3.LargeMotor('outA')\nhand = ev3.MediumMotor('outB')\nmotor.run_timed(time_sp=3000, duty_cycle_sp=75)\nmotor.run_forever()\nmotor.stop()\n\nir = InfraredSensor()\nir.value()\nts = TouchSensor()\nts.value()\n\n\n\n","repo_name":"toussi/ev3python","sub_path":"class1.py","file_name":"class1.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74668065223","text":"from collections import deque\r\nN,M = map(int,input().split())\r\nboard = [list(map(int,input().split())) for _ in range(N)]\r\n\r\ndx = [-1,1,0,0]\r\ndy = [0,0,-1,1]\r\n\r\ndef bfs(x,y,visited):\r\n q = deque()\r\n q.append((x,y))\r\n visited[x][y] = 1\r\n rainbow_cnt = 0\r\n rainbow = []\r\n blocks = []\r\n color = board[x][y]\r\n cnt = 1\r\n blocks.append((x,y))\r\n while q:\r\n x,y = q.popleft()\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n if 0 <= nx < N and 0 <= ny < N:\r\n if board[nx][ny] != -1 and visited[nx][ny] == 0:\r\n if board[nx][ny] == color or board[nx][ny] == 0:\r\n visited[nx][ny] = 1\r\n blocks.append((nx,ny))\r\n cnt+=1\r\n q.append((nx,ny))\r\n if board[nx][ny] == 0:\r\n rainbow_cnt+=1\r\n rainbow.append((nx,ny))\r\n\r\n for x,y in rainbow:\r\n visited[x][y] = 0\r\n\r\n return (cnt,rainbow_cnt,blocks)\r\n\r\n#가장 큰 블록 그룹 찾고 제거\r\ndef big_group():\r\n visited = [[0 for _ in range(N)] for _ in range(N)]\r\n tmp_lst = []\r\n for i in range(N):\r\n for j in range(N):\r\n if board[i][j] > 0 and visited[i][j] == 0:\r\n tmp = bfs(i,j,visited)\r\n if tmp[0] >= 2:\r\n tmp_lst.append(tmp)\r\n tmp_lst.sort(key=lambda x: (x[0],x[1],x[2]),reverse=True)\r\n\r\n return tmp_lst\r\n\r\n#제거 후 점수 계산\r\ndef cal_score(lst):\r\n\r\n for x,y in lst[2]:\r\n board[x][y] = -2\r\n\r\n global score\r\n score += lst[0]**2\r\n return score\r\n\r\ndef gravity(board):\r\n for i in range(N-2,-1,-1):\r\n for j in range(N):\r\n if board[i][j] > -1:\r\n r = i\r\n while True:\r\n if 0 <= r+1 < N and board[r+1][j] == -2:\r\n board[r+1][j] = board[r][j]\r\n board[r][j] = -2\r\n r+=1\r\n else:\r\n break\r\n return board\r\n\r\ndef rotate():\r\n global board\r\n lst = []\r\n for i in range(N-1,-1,-1):\r\n tmp_lst = []\r\n for j in range(N):\r\n tmp_lst.append(board[j][i])\r\n lst.append(tmp_lst)\r\n\r\n board = lst\r\n return board\r\n\r\nscore = 0\r\nwhile True:\r\n tmp_lst = big_group()\r\n if len(tmp_lst) == 0:\r\n break\r\n score = cal_score(tmp_lst[0])\r\n board = gravity(board)\r\n board = rotate()\r\n board = gravity(board)\r\n\r\nprint(score)","repo_name":"Jeongseunghun/python_algorithm","sub_path":"백준/Gold/21609. 상어 중학교/상어 중학교.py","file_name":"상어 중학교.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5039114534","text":"import logging\nfrom obspy.geodetics import gps2dist_azimuth, locations2degrees\nfrom obspy.taup import TauPyModel\n\nlogger = logging.getLogger(__name__.split('.')[-1])\nmodel = TauPyModel(model='ak135')\n\n\ndef get_arrivals(trace_lat, trace_lon, ev_lat, ev_lon, ev_depth):\n dist_deg = locations2degrees(trace_lat, trace_lon, ev_lat, ev_lon)\n distance, _, _ = gps2dist_azimuth(\n trace_lat, trace_lon, ev_lat, ev_lon)\n distance /= 1e3\n P_arrivals = model.get_travel_times(\n source_depth_in_km=ev_depth,\n distance_in_degree=dist_deg,\n phase_list=['p', 'P'])\n S_arrivals = model.get_travel_times(\n source_depth_in_km=ev_depth,\n distance_in_degree=dist_deg,\n phase_list=['s', 'S'])\n return P_arrivals[0], S_arrivals[0], distance, dist_deg\n","repo_name":"SeismicSource/requake","sub_path":"requake/arrivals.py","file_name":"arrivals.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"72849683466","text":"import sys\nfrom collections import Counter\n\n# 원래 .count() 로 쓸려고 했는데 시간초과로 Counter을 씀\n\nn = int(input())\narr_n = list(map(int, sys.stdin.readline().split()))\n\nm = int(input())\narr_m = list(map(int, sys.stdin.readline().split()))\n\ndic = dict(Counter(arr_n))\nfor i in arr_m :\n print(dic.get(i,0), end =\" \")","repo_name":"KymCat/CodingTestStudy","sub_path":"문제/실버/S10816.py","file_name":"S10816.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29850445375","text":"ref = list(map(int,input().split()))\na = ref[0]\nb = ref[1]\ngcd = 0\ni = 1\nwhile i<=a and i= pivot:\n right -= 1\n nums[left] = nums[right]\n while left < right and nums[left] <= pivot:\n left += 1\n nums[right] = nums[left]\n nums[left] = pivot\n return left\n\n# define a swap method\ndef swap(nums, i, j):\n nums[i], nums[j] = nums[j], nums[i]\n\nif __name__ == '__main__':\n nums = [2, 5, 1, 7, 3, 9, 4, 6, 8]\n print(quick_sort(nums, 0, len(nums) - 1))\n","repo_name":"guddbye/data-structures-and-algorithms","sub_path":"sorting/quick/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6960086745","text":"# sort the string using Insertion sort\n\n\ndef insertion_sort(string_array):\n try:\n for x in range(0, len(string_array)):\n key = string_array[x]\n j = x - 1\n while j >= 0 and key < string_array[j]:\n string_array[j + 1] = string_array[j]\n j = j - 1\n string_array[j + 1] = key\n except KeyError:\n print(\"key error arises \")\n\n\nif __name__ == \"__main__\":\n try:\n num = int(input(\"enter the number : \"))\n str_arr = []\n print(\"enter string to sort it : \")\n for i in range(num):\n str_arr.append(input())\n insertion_sort(str_arr)\n print('Sorted Array of string are :')\n print(str_arr)\n except ValueError:\n print(\"please enter valid inputs\")\n","repo_name":"Sudha-Sahu/AlgorithmProgramsInPython","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1885764809","text":"'''\n\tContoh Deloyment untuk Domain Data Science (DS)\n\tOrbit Future Academy - AI Mastery - KM Batch 3\n\tTim Deployment\n\t2022\n'''\n\n# =[Modules dan Packages]========================\n\nfrom flask import Flask,render_template,request,jsonify\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sklearn.tree import DecisionTreeClassifier\nfrom joblib import load\n\n# =[Variabel Global]=============================\n\napp = Flask(__name__, static_url_path='/static')\n# model = None\nmodel = load('model_iris_knn.model')\n\n# =[Routing]=====================================\n\n# [Routing untuk Halaman Utama atau Home]\t\n@app.route(\"/\")\ndef beranda():\n return render_template('index.html')\n\n# [Routing untuk API]\t\n@app.route(\"/api/deteksi\",methods=['POST'])\ndef apiDeteksi():\n\t# Nilai default untuk variabel input atau features (X) ke model\n\tinput_sepal_length = 5.1\n\tinput_sepal_width = 3.5\n\tinput_petal_length = 1.4\n\tinput_petal_width = 0.2\n\t\n\tif request.method=='POST':\n\t\t# Set nilai untuk variabel input atau features (X) berdasarkan input dari pengguna\n\t\tinput_sepal_length = float(request.form['sepal_length'])\n\t\tinput_sepal_width = float(request.form['sepal_width'])\n\t\tinput_petal_length = float(request.form['petal_length'])\n\t\tinput_petal_width = float(request.form['petal_width'])\n\t\t\n\t\t# Prediksi kelas atau spesies bunga iris berdasarkan data pengukuran yg diberikan pengguna\n\t\tdf_test = pd.DataFrame(data={\n\t\t\t\"SepalLengthCm\" : [input_sepal_length],\n\t\t\t\"SepalWidthCm\" : [input_sepal_width],\n\t\t\t\"PetalLengthCm\" : [input_petal_length],\n\t\t\t\"PetalWidthCm\" : [input_petal_width]\n\t\t})\n\n\t\twith open ('ss_scaler.pkl', 'rb') as scaler_load:\n\t\t\tscaler = pickle.load(scaler_load)\n\t\t\n\t\tpredict_data = scaler.transform(df_test)\n\t\tprediksi = model.predict(predict_data)[0]\n\n\t\t# Set Path untuk gambar hasil prediksi\n\n\t\t# Iris setosa\n\t\thasil_prediksi = 'None'\n\t\tif prediksi == 0:\n\t\t\tgambar_prediksi = '/static/images/iris_setosa.jpg'\n\t\t\thasil_prediksi = 'Iris Setosa'\n\t\t\n\t\t# Iris Versicolor\n\t\telif hasil_prediksi == 1:\t\t\t\n\t\t\tgambar_prediksi = '/static/images/iris_versicolor.jpg'\n\t\t\thasil_prediksi = 'Iris-versicolor'\n\t\t\n\t\t# Iris iris_virginica\n\t\telse:\n\t\t\tgambar_prediksi = '/static/images/iris_virginica.jpg'\n\t\t\thasil_prediksi = 'Iris Virginica'\n\t\t\n\t\t# Return hasil prediksi dengan format JSON\n\t\treturn jsonify({\n\t\t\t\"prediksi\": hasil_prediksi,\n\t\t\t\"gambar_prediksi\" : gambar_prediksi\n\t\t})\n\n# =[Main]========================================\n\nif __name__ == '__main__':\n\t\n\t# Load model yang telah ditraining\n\t# model = load('model_iris_dt.model')\n\n\t# Run Flask di localhost \n\tapp.run(host=\"localhost\", port=5000, debug=True)\n\t","repo_name":"haripurnomosidik/deploy_insight5","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18909873656","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#(c) 2022 Will Smith - WILL@WIFI-GUYS.COM\n\n\"\"\"Connect to Airwave REST API to get AP BSSID, Create PDF then Send as Email Attachment\"\"\"\n\n# Debian packages: python3-requests, python3-lxml\nimport xml.etree.ElementTree as ET # libxml2 and libxslt\nimport requests # HTTP requests\nfrom fpdf import FPDF # Create PDF\nimport pandas as pd # Create CSV\nimport urllib3 # Suppress SSL Errors\nimport smtplib, ssl # Send Email\n\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n\n# ---------------------------------------------------------------------------\n# Constants\n# ---------------------------------------------------------------------------\n\n# Set csv or pdf\nlist_type = 'pdf'\n\n# Email Parameters\ngmail_user = 'eamil@gmail.com'\ngmail_password = 'gmail_pw'\nreceiver_email_address = \"email@email.com\"\n\n# Login/password for Airwave (read-only account)\nLOGIN = 'airwave_admin'\nPASSWD = 'airwave_PW!'\n\n# URL for REST API\nLOGIN_URL = 'https://airwave.your-company.com/LOGIN'\nAP_BSSID_URL = 'https://airwave.your-company.com/api/ap_bssid_list.xml'\n\n\n# HTTP headers for each HTTP request\nHEADERS = {\n 'Content-Type' : 'application/x-www-form-urlencoded',\n 'Cache-Control' : 'no-cache'\n}\n\n# Misc\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) #Disable SSL Warnings\n\n# ---------------------------------------------------------------------------\n# Functions\n# ---------------------------------------------------------------------------\n\n# AirWave Authentication\ndef open_session():\n \"\"\"Open HTTPS session with login\"\"\"\n\n ampsession = requests.Session()\n data = 'credential_0={0}&credential_1={1}&destination=/&login=Log In'.format(LOGIN, PASSWD)\n loginamp = ampsession.post(LOGIN_URL, headers=HEADERS, data=data, verify=False)\n return {'session' : ampsession, 'login' : loginamp}\n\n# Gather AP BSSID Info\ndef get_ap_bssid(session):\n \"\"\"Get XML data and returns a dictionnaries list\"\"\"\n output = session.get(AP_BSSID_URL, headers=HEADERS, verify=False)\n ap_bssid_output = output.content\n # Parse XML for desired attributes and build a dictionnaries list\n xml_data = ET.fromstring(ap_bssid_output)\n aps = xml_data.findall(\"ap\")\n for ap in aps:\n for radio in ap.findall(\"radio\"):\n bunch_of_bssids = [b.attrib.get(\"mac\", \"ERROR\") for b in radio.findall(\"bssid\")]\n bssd_str = ','.join(bunch_of_bssids)\n bssid_data = ((f\"{ap.attrib['name']},{bssd_str}\"))\n with open('bssid.txt', 'a') as f:\n f.write(bssid_data + '\\n')\n\n# Create PDF from Dict\nclass PDF(FPDF):\n def header(self):\n# self.image('aruba_logo.png', 170, 275, 33) #bottom right\n self.image('aruba_logo.png', 170, 10, 33) #top right\n self.set_font('Arial', 'B', 15)\n self.cell(0, 10, 'Aruba AP BSSID List', 0, 1, 'C')\n\ndef create_pdf():\n with open('bssid.txt', 'r') as data:\n plaintext = data.read()\n plaintext = plaintext.replace(',', ' ')\n pdf = PDF()\n pdf.add_page()\n pdf.set_font(\"Arial\", size = 12)\n pdf.multi_cell(200, 8, txt=plaintext, align = 'L')\n pdf.output(\"bssid.pdf\")\n\n# Create CSV Files\ndef create_csv():\n df = pd.read_csv('bssid.txt',sep=';')\n df.to_csv('bssid.csv', index=None)\n# print(df.to_csv(index=None))\n\n# Removes data from temp file\ndef cleanup():\n open(\"bssid.txt\", \"w\").close()\n\n# Send Email (Gmail)\ndef send_email():\n try:\n subject = \"Aruba BSSID List\"\n body = \"Please see attachment.\"\n sender_email = gmail_user \n receiver_email = receiver_email_address\n password = gmail_password\n # Create a multipart message and set headers\n message = MIMEMultipart()\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n message[\"Subject\"] = subject\n message[\"Bcc\"] = receiver_email # Send copy to self\n\n # Add body to email\n message.attach(MIMEText(body, \"plain\"))\n if list_type == \"pdf\":\n filename = \"bssid.pdf\"\n else:\n filename = \"bssid.csv\"\n\n # Open PDF file in binary mode\n with open(filename, \"rb\") as attachment:\n # Add file as application/octet-stream\n # Email client can usually download this automatically as attachment\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(attachment.read())\n # Encode file in ASCII characters to send by email \n encoders.encode_base64(part)\n\n # Add header as key/value pair to attachment part\n part.add_header(\n \"Content-Disposition\",\n f\"attachment; filename= {filename}\",\n )\n\n # Add attachment to message and convert message to string\n message.attach(part)\n text = message.as_string()\n\n # Log in to server using secure context and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, text)\n # print(\"Email Sent\")\n except:\n print(\"Problem Sending Email\")\n\n# Mission Control\ndef main():\n session = open_session()\n get_ap_bssid(session['session'])\n if list_type == \"pdf\":\n create_pdf()\n else: \n create_csv()\n send_email()\n cleanup()\n\nmain()\n","repo_name":"WifiGuyWill/AirWave-BSSID-Report","sub_path":"bssid.py","file_name":"bssid.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"476549043","text":"import boto3\nimport paramiko\n\nec2 = boto3.resource('ec2')\ntarget_id = \"i-0019cfe9227988d5e\"\n\n# Create the key pair\n# create a file to store the key locally\n# outfile = open('ec2-keypair.pem', 'w')\n#\n# # call the boto ec2 function to create a key pair\n# key_pair = ec2.create_key_pair(KeyName='ec2-keypair')\n#\n# # capture the key and store it in a file\n# KeyPairOut = str(key_pair.key_material)\n# print(KeyPairOut)\n# outfile.write(KeyPairOut)\n\n\n# instances = ec2.create_instances(\n# ImageId='ami-06b263d6ceff0b3dd',\n# MinCount=1,\n# MaxCount=1,\n# InstanceType='t2.micro',\n# KeyName='ec2-keypair'\n# )\n#\n\ninstances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\nfor instance in instances:\n print(instance.id, instance.instance_type)\n if instance.id == target_id:\n\n key = paramiko.RSAKey.from_private_key_file('ec2-keypair.pem')\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # Connect/ssh to an instance\n try:\n # Here 'ubuntu' is user name and 'instance_ip' is public IP of EC2\n client.connect(hostname=instance.public_ip_address, username=\"ubuntu\", pkey=key)\n\n # Execute a command(cmd) after connecting/ssh to an instance\n client.exec_command(\"touch /tmp/1\")\n stdin, stdout, stderr = client.exec_command(\"ls /tmp\")\n print(stdout.read().decode('utf-8'))\n\n # close the client connection once the job is done\n client.close()\n break\n\n except Exception as e:\n print(e)\n\n","repo_name":"CT83/Dask-UI","sub_path":"aws_manager/ssher.py","file_name":"ssher.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"824457680","text":"file = open(\"./input.txt\")\r\n#file = [\"00100\", \"11110\", \"10110\", \"10111\", \"10101\", \"01111\", \"00111\", \"11100\", \"10000\", \"11001\", \"00010\", \"01010\"]\r\n\r\nnumbers = [i.strip() for i in file]\r\n\r\noxygen, co2 = \"\", \"\"\r\n\r\ndef bin_to_dec(x): # str x\r\n s = 0\r\n n = 0\r\n for i in x[::-1]:\r\n s += 2 ** n * int(i)\r\n n += 1\r\n return s\r\n\r\nco2_viable, oxygen_viable = numbers, numbers\r\n\r\nfor i in range(len(numbers[0])):\r\n ones, zeros = 0, 0\r\n has_0, has_1 = [], []\r\n for n in co2_viable:\r\n if n[i] == \"0\":\r\n zeros += 1\r\n has_0.append(n)\r\n else:\r\n ones += 1\r\n has_1.append(n)\r\n if zeros > ones:\r\n co2_viable = has_1\r\n else:\r\n co2_viable = has_0\r\n if len(co2_viable) == 1:\r\n break\r\n\r\nfor i in range(len(numbers[0])):\r\n ones, zeros = 0, 0\r\n has_0, has_1 = [], []\r\n for n in oxygen_viable:\r\n if n[i] == \"0\":\r\n zeros += 1\r\n has_0.append(n)\r\n else:\r\n ones += 1\r\n has_1.append(n)\r\n if zeros > ones:\r\n oxygen_viable = has_0\r\n else:\r\n oxygen_viable = has_1\r\n if len(oxygen_viable) == 1:\r\n break\r\n\r\nprint(bin_to_dec(co2_viable[0]) * bin_to_dec(oxygen_viable[0]))\r\n","repo_name":"The-EmptyName/Advent-of-Code-2021","sub_path":"Day 3/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5994494939","text":"import io\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"PennApps 2018-c1bab6b19fa1.json\"\n\nfrom google.cloud import vision\n\ndef detect_logos(path):\n\tclient = vision.ImageAnnotatorClient()\n\n\twith io.open(path, 'rb') as image_file:\n\t\tcontent = image_file.read()\n\n\timage = vision.types.Image(content=content)\n\n\tresponse = client.logo_detection(image=image)\n\tlogos = response.logo_annotations\n\tprint('Logos:')\n\tfor logo in logos:\n\t\tprint(logo.description)","repo_name":"0xJeremy/Image-To-Speech","sub_path":"google_test_scripts/google_logo_detector.py","file_name":"google_logo_detector.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"34294551248","text":"#!/usr/bin/python3.4\n# -*-coding:Utf-8 -*\n\n# by A. Tonnoir\n\nimport numpy as np\nfrom scipy import linalg\nimport copy\n\nimport matplotlib.pyplot as plt\n#import maillage as mesh\n\n\n#########################################################\n# BC :\nVmesSmall = np.genfromtxt('ComparaisonVNC/VmesExpIELarge.data', delimiter=\" \")\nVmesLarge = np.genfromtxt('ComparaisonVNC/VmesExpMBCLarge.data', delimiter=\" \")\n\nErrV = 100*np.abs((VmesLarge-VmesSmall)/VmesLarge)\n(nl,nc) = ErrV.shape\nerrInf = 0\nerrL2 = 0\nfor i in range(nl):\n for j in range(nc):\n if(i != j):\n errL2 += ErrV[i,j]**2\n if(errInf < ErrV[i,j]):\n errInf = ErrV[i,j]\n\nprint('Err Inf : ',errInf)\nprint('Err L2 : ',np.sqrt(errL2/(64*63.)))\n\nplt.show()\nc = plt.pcolor(100*np.abs((VmesLarge-VmesSmall)/VmesLarge),cmap='jet',vmin=0.0,vmax=100)\nplt.colorbar(c)\n#plt.savefig('DiffSmallLargeMBC.pdf')\nplt.show()\n\n\n#\n#\n# Vmes = np.genfromtxt('VmesExpIE.data',delimiter=\" \")\n# VmesEx = np.genfromtxt('VmesExa.data',delimiter=\" \")\n#\n# ErrV = np.abs(Vmes-VmesEx)/VmesEx\n# (nl,nc) = ErrV.shape\n#\n# errInf = 0\n# errL2 = 0\n# for i in range(nl):\n# for j in range(nc):\n# if(i != j):\n# errL2 += ErrV[i,j]**2\n# if(errInf < ErrV[i,j]):\n# errInf = ErrV[i,j]\n#\n# print('Err Inf : ',errInf)\n# print('Err L2 : ',np.sqrt(errL2/(64*63.))) \n\n\n# Rhoa = Vmes / VmesEx\n#\n# ll = []\n# for i in range(32):\n# print(i,64-i,Rhoa[i,64-i])\n# ll.append(Rhoa[i,64-i])\n#\n# ll.reverse()\n# plt.plot(ll,'x-')\n# plt.show()\n#########################################################\n\n\n\n\n\nprint(\"Fin du programme\")\n","repo_name":"atonnoir/PyLGRIM","sub_path":"2_SIMU_ERI/compVmes.py","file_name":"compVmes.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9145087493","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction import text\nfrom scipy.spatial import distance\nfrom sklearn.metrics import accuracy_score\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport re\nimport os\nimport string\nfrom sklearn import svm\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import confusion_matrix\n\nimport nltk\nnltk.download('punkt')\n\n\nclass AuthorClassifier:\n \n train_path=\"\"\n test_path=\"\"\n \n \n \n def fun(self):\n pass\n\n def predict(self,s):\n self.test_path=str(s)\n return self.my_fun()\n \n def train(self,s):\n self.train_path=str(s) \n\n def my_fun(self):\n \n\n ds1=pd.read_csv(self.train_path)\n train_labels=ds1.iloc[:,2]\n \n train_row=ds1.shape[0]\n \n ds2=pd.read_csv(self.test_path)\n test_row=ds1.shape[0]\n\n \n corpus1=ds1.iloc[:,1].to_numpy()\n corpus2=ds2.iloc[:,1].to_numpy() \n \n corpus=np.concatenate([corpus1,corpus2],axis=0)\n \n# print(corpus.shape)\n \n for i in range(len(corpus)):\n \n regex = re.compile('[^a-zA-Z]')\n corpus[i]=regex.sub(' ', corpus[i])\n txt = ''.join(corpus[i])\n corpus[i]=txt\n\n \n \n for i in range(len(corpus)):\n \n txt1=corpus[i].split(' ')\n txt=\"\"\n \n for j in txt1:\n if(len(j)>3):\n txt+=\" \"+j\n corpus[i]=txt\n \n \n # clf = svm.SVC(kernel='linear',C=1)\n# clf.fit(train_data, train_labels)\n# prediction=clf.predict(test_data)\n# return prediction\n\n \n \n \n for i in range(len(corpus)):\n \n stemmer= PorterStemmer()\n \n txt1=word_tokenize(corpus[i])\n txt=\"\"\n for word in txt1:\n txt+=\" \"+stemmer.stem(word)\n \n corpus[i]=txt\n corpus\n \n \n \n my_stop_words = text.ENGLISH_STOP_WORDS\n\n \n \n vectorizer = TfidfVectorizer(stop_words=my_stop_words)\n X = vectorizer.fit_transform(corpus)\n X=X.toarray()\n\n# print(X,X.shape)\n \n \n \n #from sklearn.decomposition import PCA\n #pca = PCA(n_components=1000)\n #X=pca.fit_transform(X)\n \n \n train_data=X[:train_row]\n test_data=X[train_row:]\n \n# \n# \n# print(\"Train data shape:- \",train_data.shape)\n# print(\"Train labels shape:- \",test_data.shape)\n# \n\n clf = svm.SVC(kernel='linear',C=1)\n clf.fit(train_data, train_labels)\n prediction=clf.predict(test_data)\n return prediction\n\n\n\n\n\n\n\n#from q5 import AuthorClassifier as ac\nauth_classifier = AuthorClassifier()\nauth_classifier.train('Datasets/Question-5/Train(1).csv') # Path to the train.csv will be provided\npredictions = auth_classifier.predict('Datasets/Question-5/Train(1).csv') # Path to the test.csv will be provided\n\n","repo_name":"shrayans/Machine_Learning_Models","sub_path":"Support Vector Machine/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73969625224","text":"import logging\r\nimport subprocess\r\nimport traceback\r\nimport sys\r\n\r\nimport aiohttp\r\nimport asyncio\r\nimport pkg_resources\r\n\r\n# Create a logger instance\r\nlogger = logging.getLogger(\"scraping\")\r\nlogger.setLevel(logging.INFO)\r\n\r\n# Create a console handler\r\nconsole_handler = logging.StreamHandler(sys.stdout)\r\nconsole_handler.setLevel(logging.INFO)\r\nconsole_formatter = logging.Formatter(\"%(asctime)s [%(levelname)s] %(message)s\")\r\nconsole_handler.setFormatter(console_formatter)\r\nlogger.addHandler(console_handler)\r\n\r\n# Create a file handler\r\nfile_handler = logging.FileHandler(\"scraping.log\")\r\nfile_handler.setLevel(logging.INFO)\r\nfile_formatter = logging.Formatter(\"%(asctime)s [%(levelname)s] %(message)s\")\r\nfile_handler.setFormatter(file_formatter)\r\nlogger.addHandler(file_handler)\r\n\r\n# Upgrade pip\r\ntry:\r\n subprocess.check_call(['python', '-m', 'pip', 'install', '--upgrade', 'pip'])\r\nexcept subprocess.CalledProcessError as e:\r\n logger.error(\"Error occurred while upgrading pip: {}\".format(str(e)))\r\n exit(1)\r\n\r\n# Check for missing libraries\r\nmissing_libraries = []\r\nrequired_libraries = ['spacy', 'pytesseract', 'requests', 'bs4', 'tqdm', 'fake_useragent', 'opencv-python', 'python-Levenshtein', 'nltk', 'openpyxl', 'aiohttp', 'asyncio', 'pandas', 'openpyxl', 'fake_useragent']\r\n\r\nfor library in required_libraries:\r\n try:\r\n pkg_resources.get_distribution(library)\r\n except pkg_resources.DistributionNotFound:\r\n missing_libraries.append(library)\r\n\r\nif missing_libraries:\r\n logger.error(\"The following libraries are missing or not installed:\")\r\n for library in missing_libraries:\r\n logger.error(f\"- {library}\")\r\n\r\n response = input(\"\\nDo you want to install the missing libraries? (y/n): \")\r\n if response.lower() == 'y':\r\n for library in missing_libraries:\r\n try:\r\n subprocess.check_call(['pip', 'install', library])\r\n logger.info(f\"Library {library} has been installed.\")\r\n except subprocess.CalledProcessError as e:\r\n logger.error(f\"Error occurred while installing {library}.\")\r\n logger.error(str(e))\r\n else:\r\n logger.error(\"Please install the missing libraries before running the script.\")\r\n exit(1)\r\nelse:\r\n logger.info(\"All required libraries are installed.\")\r\n\r\n# Check for missing language modules\r\nmissing_modules = []\r\nrequired_modules = ['en_core_web_sm']\r\n\r\nfor module in required_modules:\r\n try:\r\n pkg_resources.get_distribution(module)\r\n except pkg_resources.DistributionNotFound:\r\n missing_modules.append(module)\r\n\r\nif missing_modules:\r\n logger.error(\"The following language modules are missing or not installed:\")\r\n for module in missing_modules:\r\n logger.error(f\"- {module}\")\r\n\r\n response = input(\"\\nDo you want to download and install the missing language modules? (y/n): \")\r\n if response.lower() == 'y':\r\n try:\r\n subprocess.check_call(['python', '-m', 'spacy', 'download'] + missing_modules)\r\n logger.info(\"Missing language modules have been downloaded and installed.\")\r\n except subprocess.CalledProcessError:\r\n logger.error(\"Failed to download and install language modules.\")\r\n logger.error(\"Please make sure 'spacy' is installed and try running the following command manually:\")\r\n logger.error(\"python -m spacy download en_core_web_sm\")\r\n exit(1)\r\n else:\r\n logger.error(\"Please install the missing language modules before running the script.\")\r\n exit(1)\r\nelse:\r\n logger.info(\"All required language modules are installed.\")\r\n\r\nfrom spacy.util import get_lang_class\r\ndef prompt_additional_language_modules():\r\n # Hardcoded list of supported language models\r\n supported_languages = ['en', 'de', 'fr', 'es', 'it', 'nl']\r\n installed_languages = []\r\n for lang in supported_languages:\r\n try:\r\n get_lang_class(lang)\r\n installed_languages.append(lang)\r\n except OSError:\r\n pass\r\n\r\n if not installed_languages:\r\n logger.info(\"No additional language models are currently installed.\")\r\n return\r\n\r\n logger.info(\"Installed language models:\")\r\n for lang in installed_languages:\r\n logger.info(lang)\r\n\r\n response = input(\"Do you want to download and install additional language modules? (y/n): \")\r\n if response.lower() == 'y':\r\n language_code_pattern = r'^[a-z]{2}$'\r\n language_modules = []\r\n\r\n while True:\r\n language_code = input(\"Enter the language code (2 characters): \")\r\n if re.match(language_code_pattern, language_code):\r\n if language_code in installed_languages:\r\n logger.info(\"Language model is already installed.\")\r\n else:\r\n language_modules.append(language_code)\r\n response = input(\"Do you want to add more language modules? (y/n): \")\r\n if response.lower() != 'y':\r\n break\r\n else:\r\n logger.error(\"Invalid language code. Please enter a valid 2-character language code.\")\r\n\r\n try:\r\n subprocess.check_call(['python', '-m', 'spacy', 'download'] + language_modules)\r\n logger.info(\"Additional language modules have been downloaded and installed.\")\r\n except subprocess.CalledProcessError:\r\n logger.error(\"Failed to download and install additional language modules.\")\r\n logger.error(\"Please make sure 'spacy' is installed and try running the following command manually:\")\r\n logger.error(\"python -m spacy download {}\".format(\" \".join(language_modules)))\r\n sys.exit(1)\r\n else:\r\n logger.info(\"No additional language modules will be installed.\")\r\n\r\n\r\n# Prompt user for levenshtein_threshold\r\nwhile True:\r\n try:\r\n logger.info(\"Levenshtein threshold: The Levenshtein threshold is used for measuring the difference between two strings. A lower threshold allows for more lenient matching. A suggested value is 3, but you can adjust it based on your requirements.\")\r\n levenshtein_threshold = int(input(\"Enter the Levenshtein threshold for name matching (suggested value: 3): \"))\r\n if levenshtein_threshold >= 0:\r\n break\r\n else:\r\n logger.error(\"Please enter a non-negative integer value.\")\r\n except ValueError:\r\n logger.error(\"Invalid input. Please enter a non-negative integer value.\")\r\n\r\n# Prompt user for distance_threshold\r\nwhile True:\r\n try:\r\n logger.info(\"Name threshold: The distance threshold is used for similarity matching between names and email addresses. It represents the minimum similarity required for a match. A higher threshold means stricter matching. A suggested value is 0.8, but you can adjust it based on your data and preferences.\")\r\n name_threshold = float(input(\"Enter the distance threshold for similarity matching (suggested value: 0.8): \"))\r\n if 0 <= name_threshold <= 1:\r\n break\r\n else:\r\n logger.error(\"Please enter a value between 0 and 1.\")\r\n except ValueError:\r\n logger.error(\"Invalid input. Please enter a numeric value.\")\r\nimport Levenshtein\r\nimport cv2\r\nimport numpy as np\r\nfrom nltk import word_tokenize, pos_tag\r\nimport spacy\r\n\r\ndef calculate_distance(name, email, threshold):\r\n email_prefix = email.split(\"@\")[0]\r\n distance = Levenshtein.distance(name.lower(), email_prefix.lower())\r\n return distance if distance <= threshold else float('inf')\r\n\r\n\r\ndef truncate_content(content, max_length):\r\n return content[:max_length] if len(content) > max_length else content\r\nimport fake_useragent\r\nasync def parse_subpages(content, base_url, target_netloc):\r\n try:\r\n processed_urls = set()\r\n user_agent = fake_useragent.UserAgent()\r\n\r\n async with aiohttp.ClientSession() as session:\r\n queue = asyncio.Queue()\r\n queue.put_nowait((base_url, content))\r\n\r\n while not queue.empty():\r\n current_url, current_content = await queue.get()\r\n\r\n if current_url in processed_urls:\r\n continue\r\n\r\n processed_urls.add(current_url)\r\n\r\n headers = {'User-Agent': user_agent.random}\r\n async with session.get(current_url, headers=headers) as response:\r\n status_code = response.status\r\n content_type = response.headers.get('Content-Type', '')\r\n content = await response.read()\r\n\r\n if status_code != 200:\r\n logging.error(f\"Failed to fetch URL: {current_url}\")\r\n continue\r\n\r\n if 'text/html' not in content_type:\r\n logging.info(\"Skipping URL: {}\".format(current_url))\r\n continue\r\n\r\n max_content_length = 49141\r\n if len(content) > max_content_length:\r\n logging.info(\"Truncating content for URL: {}\".format(current_url))\r\n content = content[:max_content_length]\r\n\r\n emails = extract_emails_from_content(content)\r\n logging.info(\"URL: {}\".format(current_url))\r\n logging.info(\"Emails: {}\".format(emails))\r\n\r\n names = [extract_names(content, email, name_threshold, levenshtein_threshold, truncated_content=content) for email in emails]\r\n logging.info(\"Names: {}\".format(names))\r\n\r\n yield current_url\r\n\r\n subpages = parse_subpages(content, current_url, target_netloc)\r\n async for subpage_url in subpages:\r\n yield subpage_url\r\n\r\n return\r\n\r\n except aiohttp.ClientError as e:\r\n logging.error(\"Error occurred while accessing URL: {}\\nError details: {}\".format(base_url, str(e)))\r\n return\r\n\r\n except Exception as e:\r\n logging.exception(f\"Unexpected error occurred while parsing subpages: {str(e)}\")\r\n return\r\n\r\n\r\nimport re\r\n\r\n\r\nemail_patterns = [\r\n # Standard email format\r\n r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b',\r\n\r\n # Obfuscated email patterns\r\n r'\\b[A-Za-z0-9._%+-]+\\[at\\][A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b',\r\n r'\\b[A-Za-z0-9._%+-]+\\s*\\*\\s*[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b',\r\n r'\\b[A-Za-z0-9]+\\s*\\[\\.\\]\\s*[A-Za-z0-9]+\\s*\\[\\.\\]\\s*[A-Za-z0-9]+\\b',\r\n r'\\b[A-Za-z0-9]+\\s*\\(\\s*[A-Za-z0-9]+\\s*\\)\\s*[A-Za-z0-9]+\\b',\r\n r'\\b[A-Za-z0-9]+\\s*(\\*|at|\\[at\\]|\\(at\\)|{at}|\\[.\\])\\s*[A-Za-z0-9]+\\b',\r\n\r\n # ASCII representation\r\n r'&#[0-9]+;',\r\n\r\n # JavaScript encoded emails\r\n r'unescape\\((\"[^\"]+\"|\\'[^\\']+\\')\\)',\r\n r'String\\.fromCharCode\\(([0-9]+,?)+\\)',\r\n\r\n # Image-based obfuscation\r\n r']+src=\"data:image\\/[^;]+;base64,[^\"]+\"[^>]*>',\r\n r']+src=\\'data:image\\/[^;]+;base64,[^\\']+\\'[^>]*>',\r\n\r\n # Hexadecimal representation\r\n r'[0-9a-fA-F]+',\r\n\r\n # Rot13 encoding\r\n r'[a-zA-Z](?:[^a-zA-Z]*[a-zA-Z]){3,}',\r\n]\r\n\r\nemail_pattern = re.compile('|'.join(email_patterns), re.IGNORECASE)\r\n\r\ndef extract_emails_from_content(content):\r\n if isinstance(content, bytes):\r\n content = content.decode('utf-8', errors='ignore')\r\n\r\n soup = BeautifulSoup(content, 'html.parser')\r\n\r\n emails = []\r\n for pattern in email_patterns:\r\n try:\r\n extracted_emails = re.findall(pattern, content)\r\n for email in extracted_emails:\r\n if isinstance(email, str) and is_valid_email(email):\r\n emails.append(email)\r\n except Exception as e:\r\n logging.error(f\"Error occurred while extracting emails with pattern {pattern}: {str(e)}\")\r\n\r\n email_tags = soup.select('a[href^=\"mailto:\"]')\r\n for email_tag in email_tags:\r\n try:\r\n email = email_tag.get('href').replace('mailto:', '')\r\n if isinstance(email, str) and is_valid_email(email):\r\n emails.append(email.replace('[at]', '@'))\r\n except Exception as e:\r\n logging.error(f\"Error occurred while extracting email from mailto tag: {str(e)}\")\r\n\r\n return emails\r\n\r\n\r\n\r\n\r\ndef load_language_module(module_name):\r\n try:\r\n nlp = spacy.load(module_name)\r\n return nlp\r\n except OSError as e:\r\n logging.error(f\"Failed to load language module '{module_name}': {str(e)}\")\r\n sys.exit(1)\r\nnlp = load_language_module('en_core_web_sm')\r\n\r\n\r\nimport re\r\nimport pytesseract\r\n\r\n\r\ndef extract_names(content, email, name_threshold, levenshtein_threshold, truncated_content=None, nlp=None):\r\n try:\r\n email_prefix = email.split(\"@\")[0]\r\n\r\n if truncated_content:\r\n if isinstance(truncated_content, bytes):\r\n truncated_content = truncated_content.decode('utf-8', errors='ignore')\r\n doc = nlp(truncated_content)\r\n else:\r\n doc = nlp(content, disable=[\"parser\"])\r\n\r\n names = []\r\n for entity in doc.ents:\r\n if entity.label_ == 'PERSON':\r\n name = entity.text.strip()\r\n if len(name) > name_threshold:\r\n names.append(name)\r\n\r\n if names:\r\n best_match = min(names, key=lambda name: calculate_distance(name, email_prefix, levenshtein_threshold))\r\n return best_match\r\n\r\n words = []\r\n pos_tags = []\r\n for sentence in doc.sents:\r\n sentence_words = word_tokenize(sentence.text)\r\n sentence_pos_tags = pos_tag(sentence_words)\r\n words.extend(sentence_words)\r\n pos_tags.extend(sentence_pos_tags)\r\n\r\n names_nnp = [word for word, pos in pos_tags if pos == 'NNP' and len(word) > name_threshold]\r\n if names_nnp:\r\n best_match = min(names_nnp, key=lambda name: calculate_distance(name, email_prefix, levenshtein_threshold))\r\n return best_match\r\n\r\n return 'N/A'\r\n except Exception as e:\r\n logging.error(\"Error occurred while extracting names: {}\".format(str(e)))\r\n return 'N/A'\r\n\r\n\r\n\r\ndef is_valid_email(email):\r\n email_pattern = r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b'\r\n try:\r\n if isinstance(email, str):\r\n return bool(re.fullmatch(email_pattern, email))\r\n else:\r\n raise ValueError(\"Invalid email format. Email must be a string.\")\r\n except Exception as e:\r\n logging.error(f\"Error occurred while validating email: {email}\\nError details: {str(e)}\")\r\n return False\r\n\r\n\r\n\r\ndef extract_email_from_image(image_data):\r\n try:\r\n nparr = np.frombuffer(image_data, np.uint8)\r\n img = cv2.imdecode(nparr, cv2.IMREAD_GRAYSCALE)\r\n\r\n ret, thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\r\n\r\n extracted_text = pytesseract.image_to_string(thresh, config='--psm 6')\r\n\r\n email_pattern = r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b'\r\n extracted_emails = re.findall(email_pattern, extracted_text)\r\n\r\n if extracted_emails:\r\n return extracted_emails[0]\r\n else:\r\n return ''\r\n except Exception as e:\r\n logging.error(\"Error occurred while extracting email from image: {}\".format(str(e)))\r\n return ''\r\nimport logging\r\nfrom urllib.parse import urlparse, urljoin\r\nfrom bs4 import BeautifulSoup\r\n\r\nasync def fetch(session, url, user_agent):\r\n headers = {'User-Agent': user_agent}\r\n try:\r\n async with session.get(url, headers=headers) as response:\r\n content_type = response.headers.get('Content-Type', '')\r\n content = await response.read()\r\n return response.status, content_type, content\r\n\r\n except aiohttp.ClientError as e:\r\n logging.error(f\"Error occurred while fetching URL: {url}\\nError details: {str(e)}\")\r\n raise\r\n\r\n except Exception as e:\r\n logging.exception(f\"Unexpected error occurred while fetching URL: {url}\")\r\n raise\r\n\r\n\r\nasync def scrape_url(session, url, name_threshold, user_agent, target_netlocs, retries=3):\r\n for attempt in range(retries):\r\n try:\r\n emails, names, subpages, base_url, content = await scrape_data(url, name_threshold, user_agent, target_netlocs)\r\n return emails, names, subpages, base_url, content\r\n\r\n except aiohttp.ClientError as e:\r\n logging.error(f\"Error occurred while accessing URL: {url}\\nError details: {str(e)}\")\r\n\r\n except Exception as e:\r\n logging.exception(f\"Unexpected error occurred while scraping URL: {url}\")\r\n raise\r\n\r\n if attempt < retries - 1:\r\n logging.info(f\"Retrying {url} (attempt {attempt + 2} of {retries})...\")\r\n await asyncio.sleep(1) # Add a small delay before retrying\r\n\r\n logging.error(f\"Failed to scrape {url} after {retries} attempts.\")\r\n return [], [], set(), '', ''\r\n\r\nasync def scrape_data(urls_to_scrape, name_threshold, user_agent, target_netlocs, max_threads):\r\n all_emails = []\r\n email_to_name = {}\r\n processed_urls = set(urls_to_scrape)\r\n\r\n async def process_url(session, url):\r\n try:\r\n emails, names, subpages, base_url, content = await scrape_url(session, url, name_threshold, user_agent, target_netlocs)\r\n all_emails.extend([(email, url) for email in emails])\r\n for email, name in zip(emails, names):\r\n if is_valid_email(email):\r\n email_to_name[email] = name\r\n\r\n return subpages\r\n\r\n except Exception as e:\r\n logging.exception(f\"Error occurred during data scraping for URL: {url}\")\r\n return []\r\n\r\n async def process_urls(urls_to_process):\r\n async with aiohttp.ClientSession() as session:\r\n tasks = [process_url(session, url) for url in urls_to_process]\r\n subpages_list = await asyncio.gather(*tasks, return_exceptions=True)\r\n subpages = set()\r\n\r\n for sublist in subpages_list:\r\n if isinstance(sublist, list):\r\n subpages.update(sublist)\r\n\r\n return subpages\r\n\r\n try:\r\n while urls_to_scrape:\r\n subpages = await process_urls(urls_to_scrape)\r\n urls_to_scrape = subpages.difference(processed_urls)\r\n processed_urls.update(urls_to_scrape)\r\n\r\n if not all_emails:\r\n logging.warning(\"No valid emails found.\")\r\n else:\r\n logging.info(f\"Found {len(all_emails)} valid emails.\")\r\n\r\n except Exception as e:\r\n logging.exception(\"Error occurred in the scrape_data function\")\r\n\r\n return all_emails, email_to_name\r\n\r\n\r\n\r\n\r\n\r\nimport pandas as pd\r\nfrom openpyxl.styles import Font\r\nfrom openpyxl import load_workbook, Workbook\r\ndef save_data(all_emails, email_to_name):\r\n unique_emails = set()\r\n filtered_emails = []\r\n\r\n for email, url in all_emails:\r\n if email not in unique_emails:\r\n unique_emails.add(email)\r\n filtered_emails.append((email, email_to_name.get(email, 'N/A'), url))\r\n\r\n valid_emails = [(email, name, url) for email, name, url in filtered_emails if is_valid_email(email)]\r\n\r\n if not valid_emails:\r\n logging.warning(\"No valid emails found.\")\r\n\r\n df = pd.DataFrame(valid_emails, columns=['Email', 'Name', 'URL'])\r\n\r\n try:\r\n # Load the workbook or create a new one\r\n try:\r\n workbook = load_workbook('scraped_data.xlsx')\r\n except FileNotFoundError:\r\n workbook = Workbook()\r\n\r\n # Select the active sheet or create a new one\r\n sheet_name = 'Sheet1'\r\n if sheet_name in workbook.sheetnames:\r\n sheet = workbook[sheet_name]\r\n else:\r\n sheet = workbook.create_sheet(sheet_name)\r\n\r\n # Determine the starting row for writing data\r\n start_row = sheet.max_row + 1 if sheet.max_row > 0 else 1\r\n\r\n # Write data to the sheet\r\n for _, row in df.iterrows():\r\n sheet.append(row.tolist())\r\n\r\n # Move the \"Name\" column before the \"URL\" column\r\n if len(valid_emails) > 0:\r\n sheet.move_range(f'C{start_row}:C{sheet.max_row}', rows=-1)\r\n\r\n # Apply formatting\r\n headers = ['Email', 'Name', 'URL']\r\n for col_num, header in enumerate(headers, start=1):\r\n cell = sheet.cell(row=start_row, column=col_num)\r\n cell.value = header\r\n cell.font = Font(bold=True)\r\n\r\n # Save the workbook\r\n workbook.save('scraped_data.xlsx')\r\n\r\n except Exception as e:\r\n logging.exception(\"Error occurred while saving data to Excel file:\\n{}\".format(str(e)))\r\n\r\n logging.info(\"Data saved to scraped_data.xlsx\")\r\n\r\n\r\n\r\ndef main():\r\n try:\r\n # Prompt user for modifying user agent behavior\r\n user_agent_behavior = input(\"Do you want to modify the user agent behavior? (yes/no): \")\r\n\r\n # Prompt user for the URL to scrape\r\n urls = input(\"Enter the URLs to scrape (separated by commas if multiple): \").split(',')\r\n\r\n # Prompt user for the maximum depth of scraping\r\n while True:\r\n try:\r\n max_depth = int(input(\"Enter the maximum depth of scraping (recommended: 1-5): \"))\r\n if max_depth > 0:\r\n break\r\n else:\r\n print(\"Please enter a positive integer value.\")\r\n except ValueError:\r\n print(\"Invalid input. Please enter a positive integer value.\")\r\n\r\n # Prompt user for the number of concurrent threads\r\n while True:\r\n try:\r\n max_threads = int(input(\"Enter the maximum number of concurrent threads (recommended: 5-10): \"))\r\n if max_threads > 0:\r\n break\r\n else:\r\n print(\"Please enter a positive integer value.\")\r\n except ValueError:\r\n print(\"Invalid input. Please enter a positive integer value.\")\r\n\r\n # Prompt user for additional language modules\r\n prompt_additional_language_modules()\r\n\r\n # Get the netlocs of the URLs\r\n target_netlocs = [urlparse(url).netloc for url in urls]\r\n\r\n # Create an event loop\r\n loop = asyncio.get_event_loop()\r\n\r\n # Scrape data\r\n all_emails, email_to_name = loop.run_until_complete(scrape_data(urls, name_threshold, user_agent_behavior, target_netlocs, max_threads))\r\n\r\n # Save data\r\n save_data(all_emails, email_to_name)\r\n\r\n except Exception as e:\r\n logging.exception(\"An error occurred during execution: {}\".format(str(e)))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"SleepingGod/SGEmailCrawlerV2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38030417866","text":"from argparse import ArgumentParser\n\nimport torch\nfrom torch import nn\nfrom torch.optim import AdamW\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader, random_split\nfrom tqdm import tqdm\n\nfrom gaze_model import annetV3\nfrom eye_dataset import eyeDataset\n\nfrom ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events\nfrom ignite.metrics import Loss\n\n\ndef get_data_loaders(train_batch_size, val_batch_size, data_split, dataset_dir):\n img_transform = transforms.Compose([\n transforms.ColorJitter(brightness=0.3, contrast=0.3),\n #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n \n dataset = eyeDataset(dataset_dir, img_transform, True, True)\n\n train_set, test_set = random_split(dataset, [int(len(dataset)*data_split), int(len(dataset)-(int(len(dataset)*data_split)))])\n\n train_loader = DataLoader(train_set, train_batch_size, shuffle=True, num_workers=2, pin_memory=True)\n val_loader = DataLoader(test_set, val_batch_size, shuffle=True, num_workers=2, pin_memory=True)\n return train_loader, val_loader\n\n\ndef run(train_batch_size, val_batch_size, data_split, dataset_dir, epochs, lr, weight_decay, log_interval, save_dir, use_last_checkpoint):\n train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size, data_split, dataset_dir)\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = annetV3(device=device, in_channels=2)\n \n if use_last_checkpoint == True:\n model.load_state_dict(torch.load('./checkpoints/checkpoint.pth', map_location=device))\n\n model.to(device) # Move model before creating optimizer\n optimizer = AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)\n criterion = nn.MSELoss()\n trainer = create_supervised_trainer(model, optimizer, criterion, device=device)\n\n val_metrics = {\"MSE\": Loss(criterion)}\n evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)\n\n pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=f\"ITERATION - loss: {0:.6f}\")\n\n @trainer.on(Events.EPOCH_STARTED)\n def log_epoch_start(engine):\n tqdm.write(f\"Epoch {engine.state.epoch} started\")\n\n @trainer.on(Events.ITERATION_COMPLETED(every=log_interval))\n def log_training_loss(engine):\n pbar.desc = f\"ITERATION - loss: {engine.state.output:.6f}\"\n pbar.update(log_interval)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n tqdm.write(f\"Evaluating traning data on model...\")\n pbar.refresh()\n evaluator.run(train_loader)\n metrics = evaluator.state.metrics\n avg_MSE = metrics[\"MSE\"]\n tqdm.write(f\"Training Results - Epoch: {engine.state.epoch} Avg loss: {avg_MSE:.6f}\")\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def save_model(engine):\n avg_MSE = evaluator.state.metrics[\"MSE\"]\n model_name = \"checkpoint.pth\"\n tqdm.write(f\"{trainer.last_event_name.name}: saving model as: {model_name}\")\n torch.save(model.state_dict(), './checkpoints/' + model_name)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_time(engine):\n tqdm.write(f\"{trainer.last_event_name.name} took { trainer.state.times[trainer.last_event_name.name]} seconds\")\n\n pbar.n = pbar.last_print_n = 0\n\n @trainer.on(Events.COMPLETED)\n def log_validation_results(engine):\n tqdm.write(f\"Evaluating validation data on model...\")\n evaluator.run(val_loader)\n metrics = evaluator.state.metrics\n avg_MSE = metrics[\"MSE\"]\n tqdm.write(f\"Validation Results - Epoch: {engine.state.epoch} Avg loss: {avg_MSE:.6f}\")\n\n @trainer.on(Events.COMPLETED)\n def log_time(engine):\n tqdm.write(f\"{trainer.last_event_name.name} took { trainer.state.times[trainer.last_event_name.name]} seconds\")\n\n @trainer.on(Events.COMPLETED)\n def save_model(engine):\n avg_MSE = evaluator.state.metrics[\"MSE\"]\n model_name = f\"gazeModel_epoch_{engine.state.epoch}_loss_{avg_MSE:.6f}.pth\"\n tqdm.write(f\"{trainer.last_event_name.name}: saving model as: {model_name}\")\n torch.save(model.state_dict(), save_dir + model_name)\n\n trainer.run(train_loader, max_epochs=epochs)\n pbar.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--batch_size\", type=int, default=64, help=\"input batch size for training (default: 64)\")\n parser.add_argument(\"--val_batch_size\", type=int, default=128, help=\"input batch size for validation (default: 128)\")\n parser.add_argument(\"--data_split\", type=float, default=0.9, help=\"training/validation dataset split (default: 0.9)\")\n parser.add_argument(\"--dataset_dir\", type=str, default='./eye_dataset/', help=\"dataset directory\")\n parser.add_argument(\"--epochs\", type=int, default=100, help=\"number of epochs to train (default: 10)\")\n parser.add_argument(\"--lr\", type=float, default=1.407292394580727e-07, help=\"learning rate (default: 0.0001)\")\n parser.add_argument(\"--weight_decay\", type=float, default=0.3, help=\"Adam weight_decay (default: 0.3)\")\n parser.add_argument(\"--log_interval\", type=int, default=10, help=\"how many batches to wait before logging training status\")\n parser.add_argument(\"--save_dir\", type=str, default='./garage/', help=\"directory to save the model in (default: ./garage/)\")\n parser.add_argument(\"--use_last_checkpoint\", type=bool, default=False, help=\"wether to use last checkpoint (default: False)\")\n\n args = parser.parse_args()\n\n run(args.batch_size, args.val_batch_size, args.data_split, args.dataset_dir, args.epochs, args.lr,\n args.weight_decay, args.log_interval, args.save_dir, args.use_last_checkpoint)","repo_name":"TheGislum/P6-gaze-estimation","sub_path":"train_new.py","file_name":"train_new.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74549846344","text":"# 雪花ID\nfrom snowflake import Snowflake\nfrom datetime import datetime\n\nsnowflake = Snowflake(0, 0, 0, datetime(2023, 1, 1))\n\n# 数据库相关\nfrom models import db, connect_db, Project, Round, Group, Role, GroupRoleRelation, RoleFileRelation\nfrom .roundServices import getList as RoundGetList\nfrom sqlalchemy import or_\n\n\ndef add(name, startTime, endTime, frontImg, remark, createBy):\n id = snowflake.generate_id()\n item = Project(\n id=id,\n name=name,\n startTime=startTime,\n endTime=endTime,\n frontImg=frontImg,\n remark=remark,\n createBy=createBy,\n )\n db.session.add(item)\n db.session.commit()\n return id\n\n\ndef getList():\n result = db.session.query(Project).all()\n result_dict = [project.to_dict() for project in result]\n return result_dict\n\n\ndef getInfo(id):\n result = db.session.query(Project).get(id)\n if result:\n projectInfo = result.to_dict()\n roundList = RoundGetList(id)\n projectInfo['roundList'] = roundList\n return projectInfo\n else:\n return ''\n\n\ndef delInfo(id):\n result = db.session.query(Project).get(id)\n db.session.delete(result)\n db.session.commit()\n return 'successful'\n","repo_name":"Zehight/vote-flask","sub_path":"Services/projectServices.py","file_name":"projectServices.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28629298269","text":"n, m = map(int, input().split())\ncount = 0\nparent = list(range(n + 1))\ntruth = list(map(int, input().split()))[1:]\nparty = list()\n\ndef find(node):\n if parent[node] != node:\n parent[node] = find(parent[node])\n return parent[node]\n\ndef union(a, b):\n r1, r2 = map(find, (a, b))\n if r1 != r2:\n parent[r2] = r1\n\nfor _ in range(m):\n party.append(list(map(int, input().split()))[1:])\n\nfor people in party:\n for i in range(1, len(people)):\n union(people[i - 1], people[i])\n\nroot = find(1)\nknow = [parent[t] for t in truth]\n\nfor i in range(m):\n possible = True\n for t in truth:\n if find(party[i][0]) == find(t):\n possible = False\n break\n if possible:\n count += 1\n\nprint(count)","repo_name":"mangbaam/CodingTest","sub_path":"백준/Gold/1043. 거짓말/거짓말.py","file_name":"거짓말.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70421507464","text":"# coding: utf-8\nfrom datetime import datetime\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\ndef get_commit_hist(commits):\n commit_profile = {\n 'daily': [],\n 'weekly': []\n }\n\n for c in commits:\n time = datetime.strptime(c['commit']['author']['date'][:19], '%Y-%m-%dT%H:%M:%S')\n # 每条序列加起来等于1,再设一个特征作为weekends/weekdays\n commit_profile['daily'].append(time.hour)\n commit_profile['weekly'].append(time.isoweekday())\n\n return commit_profile['daily']\n\ndef load_data(rpath):\n cmap = ['RdPu', 'YlOrRd', 'YlGnBu', 'plasma_r', 'rocket_r', 'viridis_r', 'Wistia', 'summer_r']\n fig = plt.figure(figsize=(16, 9))\n count = 0\n with open(rpath, 'r') as rf:\n for line in rf.readlines():\n count += 1\n with sns.axes_style('darkgrid'):\n fig.add_subplot(3, 4, count)\n plt.title('Mode #' + str(count))\n user = json.loads(line.strip())\n commit_profile = get_commit_hist(user['commits_list'])\n #plot_heatmap(commit_profile)\n sns.distplot(commit_profile)\n #plt.xticks(fontsize='small')\n #plt.xticks(rotation='90', fontsize='small')\n #plt.yticks(rotation='0', fontsize='small')\n plt.xlim(0, 23)\n plt.ylim(0, 0.20)\n plt.xticks(np.arange(0, 24, 3))\n plt.yticks(np.arange(0, 0.21, 0.1))\n plt.xlabel('Hour of Day')\n plt.ylabel('Ratio of Commits')\n\n plt.tight_layout()\n sav_fig = plt.gcf() # 'get current figure'\n sav_fig.savefig('../fig/center.pdf', format='pdf', dpi=1000)\n plt.show()\n\ndef plot_center(medoid_users, fig_wpath):\n fig = plt.figure(figsize=(16, 9))\n count = 0\n for user in medoid_users:\n count += 1\n with sns.axes_style('darkgrid'):\n fig.add_subplot(3, 4, count)\n plt.title('Mode #' + str(count))\n commit_profile = []\n\n for c in user['commits_list']:\n time = datetime.strptime(c['commit']['author']['date'][:19], '%Y-%m-%dT%H:%M:%S')\n # 每条序列加起来等于1,再设一个特征作为weekends/weekdays\n commit_profile.append(time.hour)\n\n sns.distplot(commit_profile)\n #plt.xticks(fontsize='small')\n #plt.xticks(rotation='90', fontsize='small')\n #plt.yticks(rotation='0', fontsize='small')\n plt.xlim(0, 23)\n plt.ylim(0, 0.20)\n plt.xticks(np.arange(0, 24, 3))\n plt.yticks(np.arange(0, 0.21, 0.1))\n plt.xlabel('Hour of Day')\n plt.ylabel('Ratio of Commits')\n\n plt.tight_layout()\n sav_fig = plt.gcf() # 'get current figure'\n sav_fig.savefig(fig_wpath, format='pdf', dpi=1000)\n plt.show()\n\nif __name__ == '__main__':\n #load_data('weekend_center.json')\n load_data('../data/daily_center.json')\n","repo_name":"jiayunz/Working-Pattern","sub_path":"utils/plot_center.py","file_name":"plot_center.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14354541494","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash\nimport re\n\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z0-9._-]+$')\n\nclass Email:\n def __init__(self, data):\n self.id = data['id']\n self.email = data['email_address']\n\n\n @classmethod\n def saveToDB(cls,data):\n query = 'INSERT INTO emails (email_address) VALUES(%(email_address)s);'\n return connectToMySQL('emailSchema').query_db(query, data)\n\n\n @staticmethod\n def validate_email(data):\n isValid = True\n if not EMAIL_REGEX.match(data['email_address']):\n flash(\"Invalid email address!\")\n isValid = False\n return isValid\n\n @classmethod\n def showEmailsInDB(cls):\n query = 'SELECT email_address from emails;'\n return connectToMySQL('emailSchema').query_db(query)","repo_name":"longnghts/users_cr","sub_path":"flask_app/models/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71602091145","text":"import pathlib\n\nfrom . import ACTION_PULL, ACTION_PUSH, ACTION_DELETE, ACTION_SET_CONF\n\nfrom .git_settings import GitClientSettings\nfrom .git_sync import GitClientSync\nfrom .sync_dir_registration import SyncDirRegistration\nfrom .unison_sync import UnisonClientSync\n\nimport syncmanagerclient.util.globalproperties as globalproperties\n\nfrom .api import ApiService\n\n\nclass SyncClient:\n\n def __init__(self, mode, action, sync_env=None, force=False, namespace=None):\n self.mode = mode\n self.action = action\n if sync_env:\n self.sync_env = sync_env\n else:\n self.sync_env = globalproperties.sync_env\n self.force = force\n self.namespace = namespace\n self.errors = []\n\n def get_instance(self):\n if self.mode == 'git':\n if self.action == ACTION_SET_CONF:\n return GitClientSettings()\n elif self.action in [ACTION_PUSH, ACTION_PULL, ACTION_DELETE]:\n return GitClientSync(self.action)\n else:\n raise Exception('Unknown command \\'' + self.action + '\\'.')\n elif self.mode == 'unison':\n # ACTION_PULL and ACTION_PUSH are the same in unison context\n return UnisonClientSync(self.action)\n else:\n print('Unknown client')\n return None\n\n def sync_with_remote_repo(self, config):\n client_instance = self.get_instance()\n if not client_instance:\n return\n client_instance.set_config(config, self.force)\n client_instance.apply()\n if client_instance.errors:\n self.errors.extend(client_instance.errors)\n\n def get_and_sync_repos(self):\n api_service = ApiService(self.mode, self.sync_env)\n remote_repos = api_service.list_repos_by_client_env(full=True)\n if self.namespace:\n print(f\"Only syncing repos in namespace {self.namespace}\")\n for remote_repo in remote_repos:\n if self.namespace:\n p_ns = pathlib.Path(self.namespace)\n p = pathlib.Path(remote_repo['git_repo']['server_path_rel'])\n p = pathlib.Path(*p.parts[1:])\n if not str(p).startswith(str(p_ns)):\n continue\n config = {\n 'source': remote_repo['local_path_rel'],\n 'remote_repo': remote_repo['remote_name'],\n 'url': SyncDirRegistration.get_remote_url(remote_repo['git_repo']['server_path_absolute'])\n }\n self.sync_with_remote_repo(config)\n if self.errors:\n print('')\n print('#####################################################################################')\n print('Following repositories could not be (completely) synced:')\n print('')\n for error in self.errors:\n print(f\"{error.local_repo_path}\")\n print(f\"Context: {error.context}\")\n print(\"Error message:\")\n print(error.error)\n print('-------------------------------------------------------------------------------------')\n print('')\n","repo_name":"Grid-LTS/syncmanager","sub_path":"syncmanagerclient/syncmanagerclient/clients/sync_client.py","file_name":"sync_client.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3062981356","text":"##IMPORTS\n#region\nfrom turtle import *\nfrom random import randint\nfrom math import ceil\nfrom time import sleep, time\nfrom os import system\n#endregion\n\n###ALL FUNCTION DEFINITIONS\n#region\n#Function that defines where the food will appear, and draws it\ndef food_gen():\n screen=Screen()\n screen.tracer(False)\n\n food.penup()\n\n x=(randint((-200/advancement),(200/advancement)))*advancement\n y=(randint((-120/advancement),(130/advancement)))*advancement\n canvas=getcanvas()\n ids=canvas.find_overlapping(x-2.5,-(y+2.5),x+2.5,-(y-2.5))\n while (len(ids))!=0: #making sure the food does not appear on the snake or on pill\n x=(randint((-200/advancement),(200/advancement)))*advancement\n y=(randint((-120/advancement),(130/advancement)))*advancement\n ids=canvas.find_overlapping(x-2.5,-(y+2.5),x+2.5,-(y-2.5))\n \n food.setpos(x,y)\n a=randint(0,100)\n if a%20==0:\n food.dot(5,\"cyan\")\n elif a%50==0:\n food.dot(\"purple\")\n else:\n food.dot(5,\"orange\")\n\n screen.tracer(True)\n return(x,y)\n#Function that readjusts the direction of the head turtle, allowing the snake to turn \ndef direction_change(dir):\n screen=Screen()\n screen.tracer(False)\n #print(dir)\n if dir in ['w','W','Up']:\n head.setheading(90)\n if dir in ['a','A','Left']:\n head.setheading(180)\n if dir in ['s','S','Down']:\n head.setheading(270)\n if dir in ['d','D','Right']:\n head.setheading(0)\n\n screen.tracer(True)\n#Function that returns the relevant color at the coordinate, if the color is green, black or orange\ndef getcolor(x,y):\n y=-y #correcting for the change in coordinate system that will occur\n canvas=getcanvas()\n ids=canvas.find_overlapping(x,y,x,y) #locates the colors within the rectangle\n color='nocolor'\n if ids and len(ids)>=2:\n if len(ids)==3:\n if canvas.itemcget(ids[2],\"fill\")=='green':\n color='green'\n else:\n color=canvas.itemcget(ids[0],\"fill\")\n #returns the color, when relevant\n return(color)\n#Function to update the point total on screen\ndef point_update(point):\n screen=Screen()\n screen.tracer(False)\n points.goto(-205,138)\n points.clear()\n points.write(str(point))\n screen.tracer(True)\n#Function to advance the snake\ndef advance(length,pause):\n screen=Screen()\n direction=head.heading()\n position=head.position()\n\n if len(tails)data3[user]:\n high_score=score\n data3[user]=high_score\n else:\n high_score=data3[user]\n datakeys=list(data3.keys())\n datavalues=list(data3.values())\n ind=datakeys.index(user)\n datakeys.append(datakeys.pop(ind))\n datavalues.append(datavalues.pop(ind))\n with open(filename, 'w') as file:\n for i in datakeys:\n file.writelines(i+':'+str(data3[i])+'\\n')\n file.closed\n global_high_score=max(data3.values())\n return(global_high_score,high_score) \n#Function to get gameplay mode and start the game\ndef modeandstart():\n global modeofgame\n global clickinstance\n modeofgame=0\n system('cls')\n screen=Screen()\n screen.tracer(0)\n draw.penup()\n draw.goto(-100,-50)\n draw_button('Normal')\n draw.goto(-20,-50)\n draw_button('Mines')\n draw.goto(60,-50)\n draw_button('Walls')\n screen.update()\n screen.onclick(mode_check,add=False)\n timer.penup()\n color_list=['red','gold','chartreuse']\n i=3\n while i>0:\n timer.goto(head.xcor(), head.ycor()+10)\n timer.color(color_list[3-i])\n t=0\n while t<10:\n timer.write(str(i)+\"...\",font=('Arial',int((t+4)/2),'normal'))\n t+=1\n update()\n sleep(0.1)\n timer.clear()\n i-=1\n screen.tracer(1)\n#Function to generate mines\ndef mine_gen():\n screen=Screen()\n screen.tracer(False)\n\n mine.penup()\n\n x=(randint((-200/advancement),(200/advancement)))*advancement\n y=(randint((-120/advancement),(130/advancement)))*advancement\n canvas=getcanvas()\n ids=canvas.find_overlapping(x-4,-(y+4),x+4,-(y-4))\n while (len(ids))!=0: #making sure the food does not appear on the snake or on pill\n x=(randint((-200/advancement),(200/advancement)))*advancement\n y=(randint((-120/advancement),(130/advancement)))*advancement\n ids=canvas.find_overlapping(x-4,-(y+4),x+4,-(y-4))\n mine.setpos(x,y)\n mine.dot(8,\"black\")\n for i in range(8):\n mine.pendown()\n mine.setheading(i*360/8)\n mine.forward(6)\n mine.setpos(x,y)\n update()\n screen.tracer(True)\n#Function to generate walls\ndef wall_gen():\n overlap=1\n screen=Screen()\n canvas=getcanvas()\n screen.tracer(0)\n wall.penup()\n set_direction=90*(randint(0,3))\n wall.setheading(set_direction)\n set_length=advancement*(randint(4,15))\n while overlap!=0:\n overlap=0\n x=(randint((-200/advancement),(200/advancement)))*advancement\n y=(randint((-120/advancement),(130/advancement)))*advancement\n wall.goto(x,y)\n for i in range(set_length):\n wall.forward(1)\n xcheck=wall.xcor()\n ycheck=wall.ycor()\n ids=canvas.find_overlapping(xcheck-1,-(ycheck+1),xcheck+1,-(ycheck-1))\n overlap+=len(ids)\n wall.goto(x,y)\n wall.pendown()\n wall.forward(set_length)\n wall.penup()\n screen.tracer(1)\n#Function to draw arrows\ndef draw_arrow(direct):\n screen=Screen()\n screen.tracer(0)\n draw.setheading(90*direct)\n draw.pendown()\n draw.forward(8)\n posit=draw.position()\n draw.right(150)\n draw.forward(4)\n draw.goto(posit)\n draw.setheading(90*direct)\n draw.left(150)\n draw.forward(4)\n draw.goto(posit)\n draw.penup()\n screen.tracer(1)\n#Function to replace the stamp made by timer\ndef timer_stamp():\n screen=Screen()\n screen.tracer(0)\n timer.fillcolor('white')\n timer.pencolor('white')\n timer.pendown()\n timer.begin_fill()\n for i in range(4):\n timer.forward(5)\n timer.right(90)\n timer.end_fill()\n timer.penup()\n#Function to assign a value\ndef assign(string):\n uis.append(string)\n#Function to draw a button with something written inside, at the center\ndef draw_button(string):\n draw.fillcolor('khaki')\n draw.begin_fill()\n draw.setheading(90)\n draw.forward(20)\n draw.right(90)\n draw.forward(40)\n draw.right(90)\n draw.forward(20)\n draw.right(90)\n draw.forward(40)\n draw.end_fill()\n draw.penup()\n draw.right(90)\n draw.forward(5)\n x=draw.xcor()\n draw.color('black')\n draw.write(string,move=True,font=('Arial',6,'normal'))\n x2=draw.xcor()-x\n draw.undo()\n draw.setheading(0)\n draw.forward((40-x2)/2)\n draw.write(string,font=('Arial',6,'normal'))\n#Function to check which mode\ndef mode_check(x,y):\n global clickinstance\n global modeofgame\n if clickinstance==0:\n if -100<=x<=-60 and -50<=y<=30:\n modeofgame=1\n clickinstance=1\n elif -20<=x<=20 and -50<=y<=30:\n modeofgame=2\n clickinstance=1\n elif 60<=x<=100 and -50<=y<=30:\n modeofgame=3\n clickinstance=1\n else:\n modeofgame=0\n#Function to check which user is playing\ndef check_user(letter):\n global username\n global attempt\n if letter=='n':\n username=screen.textinput(\"USERNAME\",\"Enter username:\")\n else:\n attempt+=1\n#Function to locate last user to play the game\ndef get_last_user(filename):\n try:\n with open(filename) as data:\n data2=data.read() #opening the file in read mode\n data.closed #closing the file\n data2=data2.split(\"\\n\")\n data2=data2[:-1]\n last_user=data2[-1].split(':')[0]\n except:\n with open(filename,'w') as file:\n file.writelines(\"Visitor:0\\n\")\n file.closed\n with open(filename) as data:\n data2=data.read()\n data.closed\n last_user=data2.split(':')[0]\n return(last_user) \n#endregion\n\n##NECESSARY VARIABLES\n#region\ntails=[] #list storing all the parts of the tail\ngame=0 #intitial value of game\ndirection='d' #initial value for direction\nuser_direction=0 #initial value for valid input direction\ninvalid={'w':'s','a':'d','s':'w','d':'a'} #dictionary of invalid turns\narrow_equivalencies={'Up':'w','Left':'a','Down':'s','Right':'d'}\ntotal=0 #number of points\nlength=10 #length of the snake\nvelocity=11 #velocity of head\nadvancement=5 #each advancement of the snake\nnum_special=0 #number of special pills on screen\nstart_pill_time=0 #value to store pill time\nscreen=Screen() #screen\nprev_time=0 #time at which the special pill will be produced\nmove_delay=20 #variable to allow for an increase in speed\nmine_time=0 #time at which mine will be produced\nuis=['d'] #initial value of user inputs\nwalls_time=0 #time at which wall will be produced\nfood_eaten=0 #amount of normal food eaten at the given moment\nfood_till=0 #amount of food eaten by the time the special pill appears\nclickinstance=0\nmodeofgame=0\nattempt=0\n#endregion\n\n##TURTLE SETUP\n#region\nscreen.setup(450,340,900,200) #initial setup\nhead=Turtle()\nfood=Turtle()\ndraw=Turtle()\npoints=Turtle() \nspecial=Turtle()\ntimer=Turtle()\nmine=Turtle()\nwall=Turtle()\n#endregion\n\n##INITIALIZING\n#region\ninitialization()\nmodeandstart()\nfood_coor=food_gen() #generating food\ngame='start'\n\nstart_time=int(time()) #time at beginning of game\nscreen.tracer(False)\nfor i in range(198):\n draw.undo()\nscreen.tracer(True)\n#endregion\n\n###############\n###GAME PLAY###\n###############\n#region\nwhile game!=\"end\":\n if head.xcor()>=204 or head.xcor()<-204 or head.ycor()>=134 or head.ycor()<=-124:\n game='end' #lose game if you leave board\n \n #USER INPUT\n #region\n prev=uis[0]\n uis=([uis[-1]])\n modded_uis=[]\n if prev in arrow_equivalencies.keys():\n prev=arrow_equivalencies[prev]\n prev=prev.lower()\n for letter in ['w','a','s','d','W','A','S','D','Up','Down','Right','Left']:\n screen.onkeypress(lambda n=letter: assign(n), letter)\n screen.listen()\n if uis[0] in arrow_equivalencies.keys():\n uis[0]=arrow_equivalencies[uis[0]]\n uis[0]=uis[0].lower()\n if uis[0]!=invalid[prev] and uis[0]!=prev:\n direction_change(uis[0])\n #endregion\n \n advance(length,move_delay) #move snake\n move_delay+=0.3\n color_under=getcolor(head.xcor(),head.ycor()) #check what color is underneath the snake\n screen.tracer(0)\n if (color_under=='orange'): #check for food, and increase length if food is consumed\n food.undo()\n length+=4\n food_coor=food_gen()\n total+=4\n point_update(total)\n food_eaten+=1\n elif(color_under=='cyan'):\n food.undo()\n food_coor=food_gen()\n total+=50\n point_update(total)\n food_eaten+=1\n move_delay+=10 \n elif(color_under=='purple'):\n food.undo()\n food_coor=food_gen()\n total+=70\n point_update(total)\n food_eaten+=1\n move_delay/=5\n length*=2\n screen.tracer(1)\n \n current_time=int(time()-start_time) #timer at this turn \n \n #SPECIAL PILL\n #region\n if current_time % 30 == 15 and num_special==0 and food_eaten>food_till+3: #create a special pill and its timer every 30 seconds\n start_pill_time=int(time()-start_time)\n food_till=food_eaten\n screen.tracer(False)\n special_pill()\n num_special+=1\n timer_setup()\n timer.goto(timer.xcor()+2,timer.ycor()-2)\n timer.isvisible()\n screen.tracer(True)\n if start_pill_time!=0:\n screen.tracer(0)\n timer.color('white')\n screen.tracer(1)\n if prev_time!=current_time-start_pill_time: #to assure it only updates once per second for 10 seconds\n screen.tracer(False)\n timer.setheading(180)\n timer_stamp()\n timer.forward(5)\n screen.tracer(True) #erase a tenth of the timer\n prev_time=current_time-start_pill_time #update previous time for comparison\n if current_time-start_pill_time>=10: #after 10 seconds, clear the pill\n screen.tracer(False)\n start_pill_time=0\n special.clear()\n num_special=0\n timer.clear()\n screen.tracer(True)\n\n if color_under=='red': #points added for reaching pill, and clearing it out\n screen.tracer(False)\n special.clear()\n timer.clear()\n total+=10*(10-(current_time-start_pill_time))\n length+=(16-(current_time-start_pill_time))\n start_pill_time=0\n num_special=0\n point_update(total)\n screen.update()\n screen.tracer(True)\n #endregion\n if modeofgame==2 and current_time>mine_time:\n mine_time+=22-int(food_eaten/4)\n mine_gen()\n if modeofgame==3 and current_time>walls_time:\n walls_time+=45-int(food_eaten/3)\n wall_gen()\n\n if color_under=='green' or color_under=='black': #end game\n game='end'\n#endregion\n\n##END OF GAME\n#region\nclearscreen()\nfilename='normal_scores.txt'\nif modeofgame==2:\n filename='mine_scores.txt'\nelif modeofgame==3:\n filename='wall_scores.txt'\nusername=get_last_user(filename)\nletter=screen.textinput(\"USER\",\"Are you user {}?(y,n)\".format(username))\ncheck_user(letter)\nhighest_scores=high_score(filename,total,username)\nhighest_score=highest_scores[1]\nglobal_highest=highest_scores[0]\nendgame(total,highest_score,username, global_highest) #end of game animations\nscreen.exitonclick()\nsystem('cls')\n#endregion","repo_name":"ShubhamSKA/Games","sub_path":"Snake/Snake_v8.py","file_name":"Snake_v8.py","file_ext":"py","file_size_in_byte":21682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16460838392","text":"# Palette.py\n# Extends hitherdither.palette\nimport hitherdither\nfrom PIL import Image\nimport numpy as np\nfrom skimage.color import rgb2lab, deltaE_ciede2000\nfrom enum import Enum\n\n\ncolordelta = Enum('colordelta',['EUCLIDEAN','CCIR','LAB'])\n\nCCIR_LUMINOSITY = np.array([299.0, 587.0, 114.0])\n\nclass Palette(hitherdither.palette.Palette):\n \n colordelta = colordelta.EUCLIDEAN\n\n def color_compare(self,c1, c2):\n luma_diff = c1.dot(CCIR_LUMINOSITY) / (255.0 * 1000.0) - c2.dot(CCIR_LUMINOSITY) / (255.0 * 1000.0)\n diff_col = (c1 - c2) / 255.0\n return ((diff_col ** 2).dot(CCIR_LUMINOSITY / 1000.0) * 0.75) + (luma_diff ** 2)\n\n def DeltaE(self,c1,c2):\n Lab1 = rgb2lab(c1/255)\n Lab2 = rgb2lab(np.array([[c2/255]]))\n return deltaE_ciede2000(Lab2[0][0],Lab1, kL= 0.5,kC=0.75)\n\n def image_distance(self, image, order=2):\n ni = np.array(image, \"float\")\n distances = np.zeros((ni.shape[0], ni.shape[1], len(self)), \"float\")\n for i, colour in enumerate(self):\n if self.colordelta == colordelta.EUCLIDEAN:\n distances[:, :, i] = np.linalg.norm(ni - colour, ord=order, axis=2)\n elif self.colordelta == colordelta.CCIR:\n distances[:, :, i] = self.color_compare(ni,colour)\n else:\n distances[:, :, i] = self.DeltaE(ni,colour)\n return distances\n\n def image_closest_colour(self, image, order=2):\n return np.argmin(self.image_distance(image, order=order), axis=2)\n\n def create_PIL_png_from_rgb_array(self, img_array):\n \"\"\"Create a ``P`` PIL image from a RGB image with this palette.\n Avoids the PIL dithering in favour of our own.\n Reference: http://stackoverflow.com/a/29438149\n :param :class:`numpy.ndarray` img_array: A ``[M x N x 3]`` uint8\n array representing RGB colours.\n :return: A :class:`PIL.Image.Image` image of mode ``P`` with colours\n available in this palette.\n \"\"\"\n cc = self.image_closest_colour(img_array, order=2)\n pa_image = Image.new(\"P\", cc.shape[::-1])\n pa_image.putpalette(self.colours.flatten().tolist())\n im = Image.fromarray(np.array(cc, \"uint8\")).im.convert(\"P\", 0, pa_image.im)\n try:\n # Pillow >= 4\n return pa_image._new(im)\n except AttributeError:\n # Pillow < 4\n return pa_image._makeself(im)","repo_name":"retrocomputacion/retrobbs","sub_path":"common/imgcvt/palette.py","file_name":"palette.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"81"} +{"seq_id":"8644465127","text":"import sys\nimport os\nimport pandas as pd\nimport functions as fn\nfrom AppConf import AppConf, PIECHART, BARCHART, WORDCLOUD, HEATMAP\n\n#############################################\ndef prepare_for_exit(msg, normal=False):\n '''\n Dislpay messages before terminating execution.\n\n Parameters:\n msg: (str) - Custom message to display\n normal: (bool) - True if the termination is expected, false otherwise.\n '''\n print(msg)\n if not normal:\n print('Aborting!')\n else:\n print('Done.')\n\n###############################################################\ndef main():\n '''\n The main control!\n '''\n # Initialise the configuration object for this instance\n appConfiguration = AppConf()\n\n print('')\n if appConfiguration.showHelp():\n prepare_for_exit('', normal=True)\n return\n\n exeName = os.path.split(sys.argv[0])\n print(f'Running {exeName[1]}...')\n\n # Get some things out of the way\n if len(sys.argv) == 0:\n prepare_for_exit('ERROR: MS Office Excel file needed as argument...')\n return \n\n # Get the xls file \n appConfiguration.setXLSfile()\n if not os.path.exists(appConfiguration.getXLSfile()):\n prepare_for_exit(f'ERROR: File does not exist :: {appConfiguration.getXLSfile()}')\n return\n\n # Check if we have interactive mode enabled\n appConfiguration.setInteractiveSession()\n\n # Check if output location is provided\n appConfiguration.setOutputPath()\n\n # Get columns from user\n ok = appConfiguration.setWorkingColumns()\n if not ok:\n prepare_for_exit('ERROR: Variables list file does not exist or no column IDs provided...')\n return \n\n # # Get figure kind from command line/user\n ok = appConfiguration.setFigureKind()\n if not ok:\n prepare_for_exit('ERROR: No valid input for figure type is provided...')\n return \n\n # Check if graphs should be displayed before saving\n appConfiguration.setViewFigure()\n \n # Get the name of the output file (if given)\n appConfiguration.setOutputFileName()\n\n # Check if there is a need to write to text file as well\n appConfiguration.setSupportFileName()\n\n # For wordclouds, get the exclusion list (if present)\n appConfiguration.setStopwords()\n\n # Get the contents of the file\n print(f'Reading {appConfiguration.getXLSfile()}...', end='')\n xlsContents = pd.read_excel(appConfiguration.getXLSfile())\n dfColumns = xlsContents.columns\n print(' OK')\n \n # Check if any user interaction is needed\n appConfiguration.interactWithUser(dfColumns)\n\n # Make sure that we got something to work with \n if len(appConfiguration.getWorkingColumns()) == 0:\n prepare_for_exit('ERROR: No valid input is provided...')\n return\n\n # Show a briefing before doing the work\n print(appConfiguration)\n\n # Ensure that each graph gets the correct input\n ok = appConfiguration.sanityCheck()\n if not ok:\n prepare_for_exit('')\n return\n \n # Create figures\n print('Saving figure...', end='')\n figKind = appConfiguration.getFigureKind()\n viewFigure = appConfiguration.viewFigure()\n figFilename = appConfiguration.getOutputFileName()\n stopwords_custom = appConfiguration.getStopwords()\n txtFile = appConfiguration.getSupportFileName()\n varList = appConfiguration.getWorkingColumns()\n key = list(varList.keys())[0]\n\n ok = True\n if figKind == PIECHART:\n # Do the pie chart\n fn.createPieChart(dfColumns[key], xlsContents, figFilename, display=viewFigure)\n elif figKind == WORDCLOUD:\n # Do the wordcloud\n fn.createWordCloud(dfColumns[key], xlsContents, figFilename, max_words=200, ignore=stopwords_custom, display=viewFigure) \n if len(txtFile) > 0:\n fn.dumpToText(txtFile, dfColumns[key], xlsContents)\n elif figKind == BARCHART:\n # Do the bar chart\n fn.createBarChart(dfColumns[key], xlsContents, figFilename, txtFile, display=viewFigure)\n elif figKind == HEATMAP:\n # Do the heatmap\n colList = [dfColumns[i] for i in varList]\n fn.createHeatmap(colList, xlsContents, figFilename, txtFile, display=viewFigure)\n else: \n print('\\n ** Not yet implemented! **')\n print('WARNING: No figure is produced!')\n ok = False\n \n if ok:\n print('OK')\n\n #TODO Produce any statistics?\n print('Goodbye.\\n')\n\n###############################################################\n# The main program\nif __name__ == '__main__':\n main()\n ","repo_name":"aliakatas/FancyFigures","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25397177104","text":"from typing import Union\nfrom fastapi import APIRouter, UploadFile, File\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel\nfrom core.src.model.employee import get_salary, get_position, \\\n get_employee_data, write_employee_data, upload_file\nfrom service_provider.api.output_template.employee_output import EmployeeDataResponseAPI\n\n# Initialise router\nemployee_router = APIRouter()\n\n\n@employee_router.post(\"/salary_calculator/\")\nasync def salary_calculator(years_of_experience: int):\n salary = get_salary(years_of_experience)\n position = get_position(years_of_experience)\n return JSONResponse(\n status_code=200,\n content={\n 'years_of_experience': years_of_experience,\n 'position': position,\n 'salary': salary,\n }\n )\n\n\n@employee_router.post(\"/retrieve_employee_data/\")\nasync def retrieve_employee_data():\n employee_data = get_employee_data()\n return EmployeeDataResponseAPI(\n all_employee_data = \\\n EmployeeDataResponseAPI.format_to_response(employee_data)\n )\n\n\n@employee_router.post(\"/add_employee_data/\")\nasync def add_employee_data(\n name: str,\n role: str,\n years_of_experience: int\n):\n write_employee_data(name, role, years_of_experience)\n return JSONResponse(\n status_code=200,\n content={\n 'message': 'updated employee data in database',\n }\n )\n\n\n@employee_router.post(\"/upload_csv_file/\")\nasync def upload_csv_file(filename: str, file: UploadFile = File(...)):\n file_path = upload_file(filename, file)\n return JSONResponse(\n status_code=200,\n content={\n 'message': 'uploaded successfully',\n 'file_path': file_path,\n }\n )\n\n","repo_name":"jingtingchong/Python-Base-Project","sub_path":"service_provider/api/entrypoint/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10174945920","text":"import socket\r\nimport time\r\nimport sys\r\nimport csv\r\nimport time\r\nimport boto3\r\nfrom picamera import PiCamera\r\n\r\ndirectory = '/home/pi/Desktop/aws' # folder name on your raspberry pi\r\n\r\nP = PiCamera()\r\nP.resolution = (800, 600)\r\n\r\n#collectionId = 'mycollection' # collection name\r\n\r\nwith open('credentials.csv', 'r') as input:\r\n next(input)\r\n reader = csv.reader(input)\r\n for line in reader:\r\n access_key_id = line[2]\r\n secret_access_key = line[3]\r\n\r\nclient = boto3.client('rekognition',\r\n region_name = 'us-east-2',\r\n aws_access_key_id = access_key_id,\r\n aws_secret_access_key = secret_access_key,\r\n )\r\n\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\ns.bind((\"\",2021))\r\ns.listen(1)\r\n\r\nwhile True:\r\n cl, addr = s.accept()\r\n print('client connected from', addr)\r\n request = str(cl.recv(1024))\r\n print('content = %s' % request)\r\n\r\n if 'msg' in request:\r\n msg = request.split('/?msg=')[1].split('HTTP')[0]\r\n msg = msg.replace('%20', ' ')\r\n resp_msg = msg\r\n if 'connect' in msg:\r\n P.start_preview()\r\n time.sleep(1)\r\n image = '{}/image_a.jpg'.format(directory)\r\n P.capture(image) # capture an image\r\n print('captured ' + image)\r\n P.stop_preview()\r\n with open(image, 'rb') as image:\r\n try:\r\n response = client.detect_labels(Image = {'Bytes':image.read()},\r\n MaxLabels=8,\r\n MinConfidence=50)\r\n result = \"\"\r\n for i in range(0,6): \r\n result = result+response.get('Labels')[i].get('Name')+\"/\"\r\n print(result)\r\n result = result.encode('utf-8')\r\n resp_msg = result\r\n except:\r\n resp_msg = \"no food detected\"\r\n elif 'check' in msg:\r\n resp_msg = \"start check\"\r\n \r\n suc_response = \"HTTP/1.1 200 OK\\r\\n\\r\\n%s\" % resp_msg\r\n cl.send(str.encode(suc_response))\r\n cl.close()\r\n #cl.send(suc_response.encode('ascii'))\r\n\r\n else:\r\n fail_response = \"HTTP/1.1 501 Implemented\\r\\n\\r\\nPlease attach msg!\"\r\n cl.send(str.encode(fail_response))\r\n cl.close()\r\n\r\n#cl.close()","repo_name":"muxueman/helloworld","sub_path":"aws&api/socket_detect.py","file_name":"socket_detect.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19014638484","text":"import time\n\n# Check if any stories available for the user.\ndef any_stories(driver):\n div_before = driver.find_element_by_xpath(\"/html/body/div[1]/section/main/div/header/div/div\")\n ariadisabled = (div_before.get_attribute(\"aria-disabled\"))\n tabindex = (div_before.get_attribute(\"tabindex\"))\n div_before.click()\n if tabindex == -1 and ariadisabled:\n return False\n return True\n\n\n# Check for the url link redirect available\ndef story_check(driver, url):\n time.sleep(1)\n # print(\"YAHA AAYA THA\")\n story_img = driver.find_element_by_xpath(\n \"/html/body/div[1]/section/div[1]/div/section/div/div[1]/div/div/div/div/div[2]/div[2]/div\")\n story_img.click()\n see_post = driver.find_element_by_xpath(\n \"/html/body/div[1]/section/div[1]/div/section/div/div[1]/div/div/div/div/div[2]/div[3]/div[1]/div[1]\")\n see_post.click()\n was_current = driver.current_url\n driver.back()\n return was_current == url\n\n\n# open the stories and navigate among them\ndef open_stories(driver, url, instaHandle):\n time.sleep(2)\n base = \"https://www.instagram.com/\" + instaHandle + \"/\"\n while driver.current_url != base:\n if story_check(url):\n return True\n # print(\"RUKO NEXT STORY PE JAA RAHE HAI\")\n time.sleep(1)\n # next_story = driver.find_element_by_xpath(\"/html/body/div[1]/section/div[1]/div/section/div/button/div\")\n # next_story.click()\n return False\n","repo_name":"kunatastic/insta-fier","sub_path":"story.py","file_name":"story.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72210991944","text":"import ibus\nimport engine\nimport sys, os, os.path\nimport tutcode\nimport skkdict\n\nfrom gettext import dgettext\n_ = lambda a : dgettext(\"ibus-tutcode\", a)\nN_ = lambda a : a\n\nsys.path.insert(0, os.path.join(os.getenv('IBUS_TUTCODE_PKGDATADIR'), 'setup'))\nimport config\n\nclass EngineFactory(ibus.EngineFactoryBase):\n def __init__(self, bus):\n self.__bus = bus\n super(EngineFactory, self).__init__(self.__bus)\n\n self.__id = 0\n bus_config = self.__bus.get_config()\n bus_config.connect(\"reloaded\", self.__config_reloaded_cb)\n bus_config.connect(\"value-changed\", self.__config_value_changed_cb)\n self.__config_reloaded_cb(bus_config)\n\n def create_engine(self, engine_name):\n if engine_name == \"tutcode\":\n self.__id += 1\n return engine.Engine(self.__bus, \"%s/%d\" % (\"/org/freedesktop/IBus/TUTCode/Engine\", self.__id))\n\n return super(EngineFactory, self).create_engine(engine_name)\n\n def __load_sysdict(self, _config):\n try:\n use_mmap = _config.get_value('use_mmap')\n instances = list()\n for path in _config.sysdict_paths:\n instances.append(skkdict.SysDict(path, use_mmap=use_mmap))\n return skkdict.MultiSysDict(instances)\n except:\n return skkdict.EmptyDict()\n\n def __config_reloaded_cb(self, bus_config):\n engine.Engine.config = config.Config(self.__bus)\n engine.Engine.sysdict = self.__load_sysdict(engine.Engine.config)\n\n def __config_value_changed_cb(self, bus_config, section, name, value):\n if section == 'engine/tutcode':\n engine.Engine.config.set_value(name, value)\n if name in ('sysdict_paths', 'use_mmap'):\n engine.Engine.sysdict = self.__load_sysdict(engine.Engine.config)\n","repo_name":"deton/ibus-tutcode","sub_path":"engine/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"16120292467","text":"from db_models.modelsv2 import ProjectAnalysis, ProjectControlLog, ReportForms\nfrom db.db import session\nfrom flask_restful import Resource, fields, marshal_with, abort\nfrom modules.json_serializator import engine_encode, engine_decode\nfrom modules.log_helper_module import add_log\nfrom resv2.project_analysis_resources import OUTPUT_FIELDS as parent_fields\n\n# PARAMS\nENTITY_NAME = \"Project Analysis By Project\"\nMODEL = ProjectAnalysis\nROUTE = \"/v2/projectSelectAnalysis/\"\nEND_POINT = \"v2-project-select-analysis\"\n\nclass LogItems(fields.Raw):\n def format(self, value):\n if value is None or value == '':\n return {'success': 0, 'warning': 0, 'error': 0, 'info': 0}\n json_ob = engine_decode(value)\n result = {\n 'success': len([x for x in json_ob if x['state_id'] == 1]),\n 'warning': len([x for x in json_ob if x['state_id'] == 2]),\n 'error': len([x for x in json_ob if x['state_id'] == 3]),\n 'info': len([x for x in json_ob if x['state_id'] == 4]),\n 'engine_operations': len([x for x in json_ob if x['state_id'] == 5])\n }\n return result\n\n\nOUTPUT_FIELDS = dict(parent_fields)\nOUTPUT_FIELDS['log'] = LogItems(attribute='pc_data')\n\nclass ProjectAnalysisRemover(Resource):\n def __init__(self):\n self.route = \"/v2/projectCleanData/\"\n self.end_point = \"v2-project-clean-data\"\n pass\n\n def delete(self, id):\n try:\n session.query(MODEL).filter(MODEL.project_id == id).delete(synchronize_session=False)\n session.commit()\n\n session.query(ReportForms).filter(ReportForms.project_id == id).delete(synchronize_session=False)\n session.commit()\n\n # if not analysis:\n # abort(404, message=\"Document {} doesn't exist\".format(id))\n # session.delete(analysis)\n # session.commit()\n return {}, 204\n except Exception as e:\n add_log(\"Error while removing {0} with id: {1}\".format(ENTITY_NAME, id))\n abort(400, message=\"Error while removing {0} with id: {1}\".format(ENTITY_NAME, id))\n\n\nclass ProjectSelectAnalysisResource(Resource):\n def __init__(self):\n self.route = ROUTE\n self.end_point = END_POINT\n pass\n\n @marshal_with(OUTPUT_FIELDS)\n def get(self, id):\n try:\n analysis = session.query(MODEL) \\\n .join(ProjectControlLog, ProjectControlLog.project_id == MODEL.project_id) \\\n .add_columns(MODEL.id, MODEL.project_id, MODEL.data,\n ProjectControlLog.id.label('pc_id'), ProjectControlLog.data.label('pc_data')) \\\n .filter(MODEL.project_id == id).first()\n if not analysis:\n abort(404, message=\"Reports not found\")\n return {\n 'id': analysis.id,\n 'data': analysis.data,\n 'project_id': analysis.project_id,\n 'pc_data': analysis.pc_data\n }\n except Exception as e:\n abort(400, message=\"Error while adding record Document\")\n","repo_name":"vyadzmak/Landau.X.Api","sub_path":"cross_res/project_analysis_by_project_resources.py","file_name":"project_analysis_by_project_resources.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34472465162","text":"from os import environ\n\n# if you set a property in SESSION_CONFIG_DEFAULTS, it will be inherited by all configs\n# in SESSION_CONFIGS, except those that explicitly override it.\n# the session config can be accessed from methods in your apps as self.session.config,\n# e.g. self.session.config['participation_fee']\n\nSESSION_CONFIG_DEFAULTS = {\n 'real_world_currency_per_point': 4.00,\n 'participation_fee': 0.00,\n 'doc': \"\",\n}\n\nSESSION_CONFIGS = [\n\n {\n 'name': 'pgg_punishment',\n 'display_name': \"Public Good Game - Punishment\",\n 'num_demo_participants': 3,\n 'app_sequence': ['pggfg'],\n 'punishment_round': 11,\n 'use_browser_bots': False,\n 'gender': False,\n },\n {\n 'name': 'pgg_punishment_gender',\n 'display_name': \"Public Good Game - Punishment Only + Gender\",\n 'num_demo_participants': 3,\n 'app_sequence': ['pggfg'],\n 'punishment_round': 1,\n 'use_browser_bots': False,\n 'gender': True,\n },\n\n]\nUSE_L10N = False\nDECIMAL_SEPARATOR = '.'\nfor i in SESSION_CONFIGS:\n i.setdefault('use_browser_bots', False)\n\n# ISO-639 code\n# for example: de, fr, ja, ko, zh-hans\nLANGUAGE_CODE = 'ru'\n\n# e.g. EUR, GBP, CNY, JPY\nREAL_WORLD_CURRENCY_CODE = 'RUB'\nUSE_POINTS = True\nPOINTS_CUSTOM_NAME = 'ECU'\nROOMS = [{'name': 'hse',\n 'display_name': 'Summer School (HSE)'}]\n\nADMIN_USERNAME = 'admin'\n# for security, best to set admin password in an environment variable\nADMIN_PASSWORD = environ.get('OTREE_ADMIN_PASSWORD')\n\nDEMO_PAGE_INTRO_HTML = \"\"\" \"\"\"\n\nSECRET_KEY = '+qbflu%yq+u0$br#xte7$klu*k55byl*yw7_$mhk^a!msth_1t'\n\n# if an app is included in SESSION_CONFIGS, you don't need to list it here\nINSTALLED_APPS = ['otree', 'pggfg']\n","repo_name":"chapkovski/hse-pggfg","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40326322196","text":"\nfile1 = open('puzzle13.txt', 'r')\nlines = file1.readlines()\n\npackets = []\nfor i in range(0, len(lines), 3):\n p1 = eval(lines[i].strip())\n p2 = eval(lines[i + 1].strip())\n\n packets.append(p1)\n packets.append(p2)\n\n\n#bubble sort\nfor b in range(len(packets)):\n for n in range(0, len(packets) - b - 1):\n p1 = packets[n]\n p2 = packets[n + 1]\n stack = []\n stack.append((p1, p2))\n\n while len(stack) > 0:\n cur = stack.pop()\n\n if len(cur[0]) == 0 and len(cur[1]) > 0:\n stack.clear()\n break\n\n for j in range(len(cur[0])):\n if j > (len(cur[1]) - 1):\n packets[n], packets[n+1] = packets[n+1], packets[n]\n stack.clear()\n break\n \n if type(cur[0][j]) is list and type(cur[1][j]) is not list:\n stack.append((cur[0][j + 1:], cur[1][j + 1:]))\n stack.append((cur[0][j], [cur[1][j]]))\n break\n\n if type(cur[0][j]) is not list and type(cur[1][j]) is list:\n stack.append((cur[0][j + 1:], cur[1][j + 1:]))\n stack.append(([cur[0][j]], cur[1][j]))\n break\n\n if type(cur[0][j]) is list and type(cur[1][j]) is list:\n stack.append((cur[0][j + 1:], cur[1][j + 1:]))\n stack.append((cur[0][j], cur[1][j]))\n break\n\n if cur[0][j] == cur[1][j]:\n if j == (len(cur[0]) - 1) and len(cur[1]) > len(cur[0]):\n stack.clear()\n break\n else:\n continue\n\n if cur[0][j] > cur[1][j]:\n packets[n], packets[n+1] = packets[n+1], packets[n]\n stack.clear()\n break\n else:\n stack.clear()\n break\n\n\nfirst = 0\nsecond = 0\nfor p in range(len(packets)):\n print(packets[p])\n if len(packets[p]) == 1 and type(packets[p][0]) is list and len(packets[p][0]) == 1 and packets[p][0][0] == 2:\n first = p + 1\n\n if len(packets[p]) == 1 and type(packets[p][0]) is list and len(packets[p][0]) == 1 and packets[p][0][0] == 6:\n second = p + 1\n\nprint(first * second)\n","repo_name":"lopes22/aoc2022","sub_path":"puzzle13_2.py","file_name":"puzzle13_2.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6860338771","text":"\"\"\"\nDraw the module's canonical icemap.\n\"\"\"\n\nimport iceplotlib.plot as iplt\n\n# load data\nnc = iplt.load('pism_plot_sample.nc')\n\n# plot\nnc.icemap(velsurf_cmap='CMRmap_r', usurf_cmap=None, usurf_colors='k')\n\n# show\nnc.close()\niplt.show()\n","repo_name":"juseg/iceplotlib","sub_path":"examples/pism_icemap_demo.py","file_name":"pism_icemap_demo.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12186390911","text":"def suma(num1, *otros): #con * se pone que en caso de que se pongan otros elementos despues de los asignados se guarden ahí\n\tprint(num1)\n\tprint(otros) #se guarda como tupla\nsuma(2,4,5,6,7,9)\n\n \ntipo = type(1)\ntipo2=type(\"Hola\")\nprint(tipo)\nprint(tipo2)\nprint(tipo==tipo2)\n\nclass Humano:\n def __init__ (self,nombre,edad,ID):\n self.nombre= nombre\n self.edad= edad\n self.ID= ID\n\n def validador(self):\n tipo= type(1)\n if(type(self.ID)==tipo):\n print(\"Tu ID es valido\")\n else:\n print(\"Tu ID no es valido, revisale intenta de nuevo\")\n\n def mayoriaEdad(self):\n if (self.edad>18):\n print(\"Tienes mayoria de edad\")\n else:\n print(\"Un no tienes mayoria de edad\") \n\npersona=Humano(\"Adrian\", 35 ,98765)\npersona.validador()\npersona.mayoriaEdad()","repo_name":"AdrianFRamirez/AD_PILARES","sub_path":"ejercicios/ensayos.py","file_name":"ensayos.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20939543216","text":"from eve import Eve\nfrom eve_sqlalchemy import SQL\nfrom eve_sqlalchemy.validation import ValidatorSQL\nfrom eve_sqlalchemy.decorators import registerSchema\nfrom flask_security import UserMixin, RoleMixin, \\\n Security, SQLAlchemyUserDatastore\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import Column, String, Integer, ForeignKey, \\\n DateTime, Table, Boolean\nfrom sqlalchemy import func\nfrom flask import request\nimport json\n\nBase = declarative_base()\n\nrole_user = Table('role_user', Base.metadata,\n Column('role_id', Integer, ForeignKey('role.id')),\n Column('user_id', Integer, ForeignKey('user.id')))\n\n\nclass Common(Base):\n __abstract__ = True\n _created = Column(DateTime, default=func.now())\n _updated = Column(DateTime, default=func.now(), onupdate=func.now())\n _etag = Column(String(40))\n\n\nclass Role(Common, RoleMixin):\n __tablename__ = 'role'\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(127), unique=True)\n description = Column(String(255))\n users = relationship(\"User\", secondary=role_user, back_populates=\"roles\")\n\n\nclass User(Common, UserMixin):\n __tablename__ = 'user'\n id = Column(Integer, primary_key=True, autoincrement=True)\n username = Column(String(255), unique=True)\n email = Column(String(255), unique=True)\n password = Column(String(255))\n active = Column(Boolean())\n confirmed_at = Column(DateTime)\n roles = relationship(Role, secondary=role_user, back_populates=\"users\")\n\nregisterSchema('role')(Role)\nregisterSchema('user')(User)\n\nsettings = {\n 'ID_FIELD': 'id',\n 'ITEM_LOOKUP_FIELD': 'id',\n 'DOMAIN': {\n 'role': Role._eve_schema['role'],\n 'user': User._eve_schema['user']\n },\n 'ITEM_METHODS': ['GET', 'DELETE', 'PUT', 'PATCH'],\n 'RESOURCE_METHODS': ['GET', 'POST', 'DELETE'],\n 'DEBUG': True,\n 'SQLALCHEMY_DATABASE_URI': 'postgres://postgres:root@localhost:5432/rest',\n 'IF_MATCH': False,\n 'ENFORCE_IF_MATCH': False,\n 'EMBEDDING': True\n}\nsettings['DOMAIN']['role']['additional_lookup'] = {\n 'url': 'regex(\"[\\w]+\")',\n 'field': 'name'\n}\n\nsusers = settings['DOMAIN']['user']\n\nsusers['item_lookup_field'] = 'id'\nsettings['DOMAIN']['role']['item_lookup_field'] = 'id'\n\nsusers['embedded_fields'] = ['roles']\nsusers['schema']['roles']['data_relation']['embeddable'] = True\n\n\napp = Eve(validator=ValidatorSQL, data=SQL, settings=settings)\ndb = app.data.driver\n\n\n@app.route('/user//roles', methods=['POST'])\ndef add_role(user_id):\n user = db.session.query(User).get(user_id)\n # TODO: assumes that id of role is sent as well - safer verison could help\n role = db.session.query(Role).get(request.json['id'])\n user.roles.append(role)\n db.session.add(user)\n db.session.commit()\n return json.dumps({'success': True}), 200, \\\n {'ContentType': 'application/json'}\n\n\n@app.route('/user//roles', methods=['DELETE'])\ndef delete_role(user_id):\n user = db.session.query(User).get(user_id)\n # TODO: assumes that id of role is sent as well - safer verison could help\n role = db.session.query(Role).get(request.json['id'])\n user.roles.remove(role)\n db.session.add(user)\n db.session.commit()\n return json.dumps({'success': True}), 200, \\\n {'ContentType': 'application/json'}\n\n\nBase.metadata.bind = db.engine\ndb.Model = Base\nuser_datastore = SQLAlchemyUserDatastore(db, User, Role)\nsecurity = Security(app, user_datastore)\n\n\ndb.drop_all()\ndb.create_all()\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"vlasy/rest-datastore-server","sub_path":"rest-datastore-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27109881270","text":"import os\r\nfrom colorama import init, Fore\r\nimport json\r\nfrom fake_useragent import UserAgent\r\nimport asyncio\r\nimport datetime\r\nfrom data.database import database\r\nimport aiohttp\r\nfrom aiohttp import FormData\r\n\r\n\r\ninit(autoreset=True)\r\n\r\n\r\nasync def addlogs(text: str):\r\n with open('logs.txt', 'a', encoding='utf-8') as file:\r\n current_datetime = datetime.datetime.now()\r\n formatted_datetime = current_datetime.strftime('%Y-%m-%d %H:%M:%S')\r\n file.write(f'[{formatted_datetime}] {text}\\n')\r\n file.close()\r\n\r\nasync def send_message(channel_id, token, text, image_ids=None):\r\n \"\"\"Отправляет запрос на отправку сообщения\"\"\"\r\n ua = UserAgent()\r\n user_agent = ua.random\r\n payload = {\"content\": str(text)} # Загружаем в dat'у наш текст\r\n headers = {\"authorization\": token, 'User-Agent': user_agent} # хедерс, передаем токен и юзер агент - чтобы не засчитали как спам\r\n\r\n data = FormData()\r\n data.add_field(\"payload_json\", json.dumps(payload))\r\n\r\n \"\"\"Проверка добавления картинок\"\"\"\r\n if image_ids is not None and isinstance(image_ids, list):\r\n for i, image_id in enumerate(image_ids):\r\n try:\r\n image_path = f'data/images/{image_id}'\r\n if os.path.exists(f\"{image_path}\"):\r\n data.add_field(f\"file{i + 1}\", open(f\"{image_path}\", \"rb\"))\r\n except Exception as e:\r\n pass\r\n\r\n \"\"\"Отправка сообщения\"\"\"\r\n async with aiohttp.ClientSession() as session:\r\n url = f\"https://discord.com/api/v9/channels/{channel_id}/messages\"\r\n async with session.post(url, headers=headers, data=data) as response:\r\n status_code = response.status\r\n await addlogs(f'Сообщение отправлено с кодом {status_code} / Канал отправки {channel_id} / Файлы {image_ids}')\r\n return status_code # Возвращаем наш статус\r\n\r\nasync def checkout_request(request):\r\n token = request[0]\r\n channelid = request[1]\r\n text = request[2]\r\n images = request[3]\r\n cooldown = request[4]\r\n constCooldown = request[5]\r\n id = request[6]\r\n if None in request:\r\n if images is None: # Проверяем, что пустая строка это не картинки\r\n ...\r\n else:\r\n \"\"\"Значит что реквест еще не заполнился\"\"\"\r\n \"\"\"Заканчиваем функцию и возвращаем 0\"\"\"\r\n print(Fore.RED + f'Не законченная строка')\r\n return 0\r\n\r\n if cooldown != 0:\r\n \"\"\"Уменьшаем кд на 1 секунду\"\"\"\r\n new_cooldown = cooldown - 1\r\n await database.updateTime(id, new_cooldown) # задаем новое время\r\n return 0 # возвращаем 0, чтобы функция не продолжала сове действие\r\n if images is not None:\r\n images = images.split(', ') # превращаем наши перечисления в массив\r\n if images == []:\r\n images = None\r\n\r\n \"\"\"Если все проверки прошли, то запускаем нашу отправку\"\"\"\r\n await database.updateTime(id, constCooldown) # Берем constCooldown как изначальное время\r\n status_code = await send_message(channelid, token, text, images)\r\n print(Fore.LIGHTCYAN_EX + f'Сообщение №{id} отправлено с кодом {status_code}')\r\n\r\n # Обновляем время до изначального\r\n\r\n\r\nasync def send_messages_and_update():\r\n \"\"\"\r\n Каждую секунду создает новые потоки со всеми реквестами в базе.\r\n :return:\r\n \"\"\"\r\n while True:\r\n await asyncio.sleep(1)\r\n all_requests = await database.getAllRequests()\r\n if all_requests is not None: # обработать событие для проверки реквеста\r\n \"\"\"Делаем так, чтобы реквесты не мешали друг другу и каждый реквест выполнялся в своем потоке.\"\"\"\r\n tasks = [checkout_request(request) for request in all_requests]\r\n all_tasks = tasks\r\n asyncio.ensure_future(asyncio.gather(*all_tasks)) # запуск\r\n\r\n print(Fore.LIGHTMAGENTA_EX + f'Круг закончился')\r\n else:\r\n print(Fore.RED + f'Нет реквестов в базе!!')\r\n\r\n\r\n\r\nasync def main():\r\n loop = asyncio.get_event_loop()\r\n tasks = [loop.create_task(send_messages_and_update())]\r\n\r\n # Запуск асинхронных задач\r\n await asyncio.gather(*tasks)\r\n\r\nif __name__ == \"__main__\":\r\n print(Fore.LIGHTBLUE_EX + f'Старт скрипта')\r\n asyncio.run(main())\r\n","repo_name":"vefixx/autoposter_py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4773078996","text":"# Import required libraries\nimport datetime\nimport json\nimport xmltodict\nimport urllib.request\nimport shutil\n\n# Store dates\ntodayDate = datetime.date.today()\ntomorrowDate = todayDate + datetime.timedelta(days=1)\n\n# Define FuelWatch RSS feed variables\nrssBase = \"http://www.fuelwatch.wa.gov.au/fuelwatch/fuelWatchRSS\"\n\nProduct = {\n \"Unleaded Petrol\": 1,\n \"Premium Unleaded\": 2,\n \"Diesel\": 4,\n \"LPG\": 5,\n \"98 RON\": 6,\n \"E85\": 10,\n \"Brand diesel\": 11\n}\n\nStateRegion = {\n \"Gascoyne\": 1,\n \"Goldfields-Esperance\": 2,\n \"Great Southern\": 3,\n \"Kimberley\": 4,\n \"Mid-West\": 5,\n \"Peel\": 6,\n \"Pilbara\": 7,\n \"South-West\": 8,\n \"Wheatbelt\": 9,\n \"Metro\": 98\n}\n\nDay = {\n \"today\": \"today\",\n \"tomorrow\": \"tomorrow\", # Only available after 2:30PM\n \"yesterday\": \"yesterday\"\n}\n\n# Loop through all Products and State Regions and get tomorrow's prices\nfor x in Product:\n for y in StateRegion:\n rssURL = rssBase + \"?Product=\" + str(Product[x]) + \"&StateRegion=\" + str(StateRegion[y]) + \"&Day=\" + Day[\"tomorrow\"]\n\n # Open FuelWatch RSS feed and save as XML file\n pathXML = \"files/xml/\" + str(tomorrowDate) + \"/fuelWatchRSS_\" + x + \"_\" + y + \".xml\"\n with urllib.request.urlopen(rssURL) as response, open(pathXML, \"wb\") as saveXML:\n shutil.copyfileobj(response, saveXML)\n\n # Open FuelWatch XML file and save as JSON file\n pathJSON = \"files/json/\" + str(tomorrowDate) + \"/fuelWatchRSS_\" + x + \"_\" + y + \".json\"\n with open(pathXML) as inXML:\n inJSON = json.dumps(xmltodict.parse(inXML.read()), indent=4)\n saveJSON = open(pathJSON, \"w\")\n saveJSON.write(inJSON)\n saveJSON.close()","repo_name":"zegor/fuelwatchwa-to-json","sub_path":"fuelwatchwa-to-json.py","file_name":"fuelwatchwa-to-json.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31770464986","text":"import pygame as pg\nfrom settings.settings import *\nfrom settings.background import *\n\n# endloop = [clock, screen, Sounds, Fonts, MenuButtons, Predator]\n\n\ndef endloop(endloopList):\n\n # GameLoop running?\n running = True\n\n # Endless sound loop\n endloopList[2].ende_sound.play(-1)\n\n while running:\n\n # Delta Time\n dt = endloopList[0].tick(FPS)\n\n # Events\n for event in pg.event.get():\n if event.type == pg.QUIT:\n endloopList[2].ende_sound.stop()\n running = False\n elif event.type == pg.MOUSEMOTION:\n # If the mouse is moved, set the center of the rect\n # to the mouse pos. You can also use pg.mouse.get_pos()\n # if you're not in the event loop.\n endloopList[5].cursor_rect.center = event.pos\n\n # Ends the game on ESC\n elif event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n endloopList[2].ende_sound.stop()\n running = False\n\n # Change states when selecting a rect\n elif event.type == pg.MOUSEBUTTONDOWN:\n if endloopList[4].objectsRectEnd[0].collidepoint(event.pos):\n endloopList[2].button.play()\n endloopList[2].ende_sound.stop()\n running = False\n return True\n\n elif endloopList[4].objectsRectEnd[1].collidepoint(event.pos):\n endloopList[2].button.play()\n endloopList[2].ende_sound.stop()\n running = False\n\n # Render\n endloopList[1].fill((WHITE))\n endloopList[1].blit(endGameBG.image, endGameBG.rect)\n\n # Render text and rects for menu\n endloopList[4].drawRectEnd(endloopList[1], 2, WHITE, WIDTH *\n 0.5 - 100, 100, 200, 50, 5)\n endloopList[4].drawText(endloopList[1], endloopList[3].font_text,\n LOCATIONEND, TEXTEND, 2, BLACK)\n\n # Blit the image at the rect's topleft coords.\n endloopList[1].blit(endloopList[5].CURSOR_IMG,\n endloopList[5].cursor_rect)\n\n # Double Buffering\n pg.display.flip()\n","repo_name":"king-anduin/moorhuhn-extreme","sub_path":"loops/endloop.py","file_name":"endloop.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"27924087782","text":"# Average 3\r\n\r\nstudent_scores = input().split(' ')\r\n\r\nn1, n2, n3, n4 = student_scores\r\n\r\nn1 = round(float(n1),1)\r\nn2 = round(float(n2),1)\r\nn3 = round(float(n3),1)\r\nn4 = round(float(n4),1)\r\n\r\ntry:\r\n exam_score = float(input())\r\nexcept EOFError:\r\n pass\r\n\r\navg = float(((n1*2.0)+(n2*3.0)+(n3*4.0)+(n4*1.0))/10)\r\n\r\nprint(\"Media: %.1f\"%(avg))\r\nif avg >= 7.0:\r\n print(\"Aluno aprovado.\")\r\nelif avg < 5.0:\r\n print(\"Aluno reprovado.\")\r\nelse:\r\n print(\"Aluno em exame.\")\r\n print(\"Nota do exame: %.1f\"%(exam_score))\r\n avg_final = (avg + exam_score)/2\r\n if avg_final >= 5.0:\r\n print(\"Aluno aprovado.\")\r\n else:\r\n print(\"Aluno reprovado.\")\r\n print(\"Media final: %.1f\" % (avg_final))","repo_name":"rendersonjunior/UriOnlineJudge-Python","sub_path":"1040_Average_3.py","file_name":"1040_Average_3.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8193973434","text":"import json\nimport sys\nimport operator\nfrom pprint import pprint\nfrom collections import defaultdict\nresultString=\"\"\n\ndef openJSONFile():\n print(\"Loading the JSON File\")\n data=json.load(open('hmmmodel.txt'))\n with open(\"hmmoutput.txt\",'w',encoding='utf-8') as file:\n \t\tfile.truncate()\n return data\n\n\ndef writeToFile(resultString):\n with open(\"hmmoutput.txt\",'a',encoding='utf-8') as file:\n file.write(resultString)\n\n\ndef findBackPointers(dictionary_keywords,words):\n\n resultString=\"\"\n\n for i,word in enumerate(reversed(words)):\n if(i==0):\n maxValue=-1\n for uniqueTag in dictionary_keywords:\n for currentTag in dictionary_keywords[uniqueTag]:\n tuple = dictionary_keywords[uniqueTag][currentTag]\n\n currentValue=tuple[0]\n previousTag=tuple[1]\n currentWord=tuple[2]\n if(uniqueTag==len(words)-i):\n #print(\"current val\" +str(currentValue))\n if(currentValue>=maxValue):\n maxValue=currentValue\n tagForWord=currentTag\n backPointer=previousTag\n #print(\"assigning tag for word\" + word +\"and tag is\"+ tagForWord)\n #exit()\n\n #print(\"backpointer is\" + backPointer)\n resultString+=word+\"/\"+tagForWord+\" \"\n else:\n for uniqueTag in dictionary_keywords:\n # print(\"unique tag\"+str(uniqueTag))\n for currentTag in dictionary_keywords[uniqueTag]:\n #print(\"curent tag\" +currentTag)\n tuple=dictionary_keywords[uniqueTag][currentTag]\n #print(tuple)\n previousTag=tuple[1]\n currentWord=tuple[2]\n if(currentTag==backPointer and uniqueTag==len(words)-i):\n prevTag=previousTag\n #print(\"prev now\" + prevTag)\n resultString+=word+\"/\"+backPointer+\" \"\n backPointer=prevTag\n finalString=resultString.split()\n finalString.reverse()\n #print(finalString)\n finalString=\" \".join(finalString)+\"\\n\"\n writeToFile(finalString)\n\ndef parseTestData(data):\n f = open(sys.argv[1], 'r', encoding=\"utf-8\")\n max_start_value=0\n max_value=0\n chosen_tag=\"\"\n tag_list=[]\n prev_state_values=[]\n starting_tag_list=[]\n joint_dict=dict()\n max_of_lists=[[]]\n max_val_prev=0\n lastIndexTagList=[] #to calculate stop probabilites.\n lastIndexValueList=[]\n\n prev_tag_list=[]\n for sentence in f:\n dictionary_keywords=defaultdict(dict)\n tag_list=[]\n words=sentence.split()\n i=0\n max_start_value = 0\n starting_tag_list=[]\n prev_val_list=[]\n prev_state_values=[]\n lastIndexTagList=[]\n lastIndexValueList=[]\n lastWordFlag=0\n uniqueCounter=0\n j=0\n for j,each_word in enumerate(words):\n uniqueCounter+=1\n #print(\"for the word\" + each_word)\n max_value=0\n if(j!=0):\n prev_word=words[j-1]\n if(each_word in data[\"emission\"]):\n tag_values= data[\"emission\"][each_word] #get the corresponding tag values.\n max_value=0\n maxtag=\"\"\n index=0\n else: #change after smoothing.\n #print(\"Encountered new word!\")\n data[\"emission\"][each_word]={}\n #print(data[\"uniquetags\"])\n for alltags in data[\"uniquetags\"]:\n data[\"emission\"][each_word][alltags]=1\n tag_values=data[\"emission\"][each_word]\n #print(\"tag values for the word\" + each_word +\"is\" + str(tag_values))\n for tag in tag_values:\n #print(\"current chosen tag is\" + tag)\n joint_dict={}\n if(i==0):\n if(tag in data[\"transition\"][\"start\"] and tag in data[\"emission\"][each_word]):\n start_val=data[\"transition\"][\"start\"][tag] * data[\"emission\"][each_word][tag]\n starting_tag_list.append(tag) #refresh for every word.\n dictionary_keywords[uniqueCounter][tag]=(start_val,\"start\",each_word)\n prev_state_values.append(start_val)\n prev_tag_list=starting_tag_list\n prev_val_list=prev_state_values\n #print(\"dictionary keywords...\" +str(dictionary_keywords))\n continue\n \n else:\n start_val= 0\n starting_tag_list.append(tag) #refresh for every word.\n prev_state_values.append(start_val)\n prev_tag_list = starting_tag_list\n prev_val_list = prev_state_values\n dictionary_keywords[uniqueCounter][tag]=(0,\"start\",each_word)\n continue\n\n else:\n for every_start_tag,every_prev_value in zip(starting_tag_list,prev_state_values):\n if(tag in data[\"transition\"][every_start_tag] and tag in data[\"emission\"][each_word]):\n #print(data[\"transition\"][every_start_tag][tag])\n #print(data[\"emission\"][each_word][tag])\n prob_val=float(data[\"transition\"][every_start_tag][tag]* data[\"emission\"][each_word][tag]*every_prev_value)\n #print(\"prob val!!!\" + str(prob_val))\n if(i==len(words)-1):\n #print(\"last word!\")\n prob_val*=data[\"transition\"][\"stop\"][tag]\n #print(data[\"transition\"][\"stop\"][tag])\n #print(\"prob val now\" + str(prob_val))\n\n elif(tag not in data[\"transition\"][every_start_tag] or tag not in data[\"transition\"][each_word]):\n prob_val=0\n\n if(max_value<=prob_val):\n max_value=prob_val\n maxtag=every_start_tag\n joint_dict.update({maxtag:max_value})\n maxx_value=max_value\n max_value=0\n every_start_tag=max(joint_dict.items(), key = lambda x: x[1])[0]\n dictionary_keywords[uniqueCounter][tag]=(joint_dict[every_start_tag],every_start_tag,each_word)\n #print(\"dict keywords..\" + str(dictionary_keywords))\n\n k=0\n prev_tag_list.append(tag)\n prev_val_list.append(max(joint_dict.values()))\n max_val_prev=0\n max_val_prev=0\n\n if (i == len(words) - 1):\n lastIndexTagList.append(tag)\n lastWordFlag=1\n lastIndexValueList.append(prob_val)\n\n max_last_value=0\n\n starting_tag_list=prev_tag_list\n prev_state_values=prev_val_list\n\n max_start=0\n\n #handling sentences with a single word.\n if(len(words)==1):\n for first_tag,first_value in zip(starting_tag_list,prev_state_values):\n if(first_value>=max_start):\n max_start=first_value\n max_first_tag=first_tag\n\n prev_tag_list=[] #refresh\n prev_val_list=[] #refresh\n i+=1\n if (lastWordFlag == 1): #take care of the stop states\n #print(\"Last state\")\n max_last_value=0\n for last_tag, last_value in zip(lastIndexTagList, lastIndexValueList):\n #print(\"last value\" + str(last_value))\n if (last_value >= max_last_value):\n max_last_value = last_value\n max_last_tag = last_tag\n #print(max_last_tag)\n\n break\n findBackPointers(dictionary_keywords,words)\n\n\n \nif __name__ == '__main__':\n data = openJSONFile()\n parseTestData(data)\n print(\"successfully done\")","repo_name":"gayathriravic/Hidden-Markov-Model-Part-of-Speech-Tagger","sub_path":"hmmdecode3.py","file_name":"hmmdecode3.py","file_ext":"py","file_size_in_byte":8357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2640715915","text":"from connect import *\nfrom tkinter import *\nimport tkinter.messagebox\nimport tkinter.ttk as ttk\nimport sys\nimport copy\nimport datetime\n\n################################################################\n# Version 1 : Création\n# Version 1.01 : Corrections de bugs mineurs et ajout d'une fonctionnalité de fichier log (à compléter --> Version 1.1)\n# Version 1.02 : Corrections de l'appel au PACS (fonction connection_pacs) suite au passage en RS9B (MaJ 1.1 : plus\n# nécessaire)\n# Version 1.1 : Changement des couleurs des ROIs et changement de noms des protocoles suite au nouveau scanner\n# Version 1.2 : Modification du script : le CT n'est plus récupéré du PACS, seul le RTStruct est importé\n# Version 1.2.1 : Modification par CM : changement du Type de la ROI 'Scar' pour passer de 'Undefined' à\n# 'IrradiatedVolume'. On lui met comme organ type 'Target'. (l. 394-395)\n# Version 1.2.2 : Modification par CM : oubli de \"Scar_L\" et \"Scar_R\" qui n'étaient pas en OrganType \"Target\"\n# (seulement \"Scar\"), ajout l.360-361.\n# Ajout de la ROI \"External - PTV TOT\" car nécessaire pour tous les template de clinical goals\n# (3D, rIMRT et VMAT)\n# Version 1.2.3 : Modification par CM : mis à jour du script pour la 10B (modification dans l'architecture du statetree\n# ui l.504_507)\n# Version 1.2.4 : Modification par CM : mettre RemoveHoles3D à False dans la fonction SimplifyContours pour éviter que\n# cela supprime certaines coupes d'OARs qui sont inclues dans des organes.\n# Version 1.3 : Modification du script de manière à ce qu'il marche pour les replanifications et reformat code pour\n# aller plus vite\n################################################################\n\n###########################################\n##### SCRIPT SENO - ETAPE 1 : MEDECIN #####\n###########################################\n\nDEBUG = False\nLOG_FILE = False\n\n#########################################\n##### VARIABLES GLOBALES RAYSTATION #####\n#########################################\n\npatient_db = get_current(\"PatientDB\")\nui = get_current(\"ui\")\n\n##############################\n##### VARIABLES GLOBALES #####\n##############################\n\npath_RS = r\"\\\\PMS-RAY2-DB01\\RS_Dicom\"\nDate_et_Heure = datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\nVersionScript = \"1.2\"\nfichier_dictionnaire_annotate = r\"\\\\PMS-RAY2-DB01\\RS_Scripts\\struc_seno.csv\"\n\nif LOG_FILE:\n saveout = sys.stdout\n print(\"saveout : \", saveout)\n filename = r\"\\\\PMS-RAY2-DB01\\RS_Scripts\\logs\\logs_\" + datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") + \".txt\"\n print(\"filename log : \", filename)\n fsock = open(filename, 'w')\n sys.stdout = fsock\n\n\n#############################\n##### FONCTIONS DE CASE #####\n#############################\n\ndef find_exam(case):\n \"\"\"Fonction renvoyant :\n :None si pas de CT dans le Case\n :le nom du CT si un seul CT\n :la liste des noms des CTs si plusieurs CTs\"\"\"\n if case.Examinations.Count == 0:\n return None\n elif case.Examinations.Count == 1:\n return [case.Examinations[0].Name]\n else:\n liste_exam = [examination.Name for examination in case.Examinations]\n return liste_exam\n\n####################################\n##### FONCTIONS D IMPORTATIONS #####\n####################################\n\ndef import_struct(caseName, total_results_series, path_series):\n patient = get_current(\"Patient\")\n for results, path in zip(total_results_series, path_series):\n print(path)\n patient.ImportDataFromPath(Path=path, CaseName=caseName, SeriesOrInstances=results)\n all_files = [os.path.join(path, o) for o in os.listdir(path) if os.path.isfile(os.path.join(path, o))]\n print(\"all_files = \", all_files)\n\n if not DEBUG:\n for f in all_files:\n os.remove(f)\n os.rmdir(path)\n return\n\ndef return_rtstruct(path, patient_id, study_to_import):\n \"\"\"Fonction renvoyant les RTstruct et les chemins associé si plusieurs séries à importer \"\"\"\n # Récupération du patient ID (key) et study Instance ID (value) dans list_study_to_import\n # UNE SEULE study_to_import POSSIBLE DONC PAS BESOIN DE FAIRE UNE LISTE\n # TOUTE LA FONCTION EST A MODIFIER CAR ELLE PREND EN COMPTE LE FAIT QU'ON PEUT RECUPERER PLUSIEURS SERIES\n list_study_to_import = []\n list_study_to_import.append({'PatientID': patient_id, 'StudyInstanceUID': study_to_import})\n\n # Récupère les chemins de tous les dossiers contenue dans le serveur \\\\PMS-RAY2-DB01\\RS_Dicom\n all_path = [os.path.join(path, o) for o in os.listdir(path) if\n (os.path.isdir(os.path.join(path, o)) and \"ARTPLAN\" in o)]\n total_results_series = []\n path_series = []\n for study_to_import in list_study_to_import:\n for path in all_path:\n # Récupère le RTStruct du chemin concerné par ce tour de boucle seulement si le SearchCriterias correspond\n # à study_to_import\n series_to_import = [s for s in patient_db.QuerySeriesFromPath(Path=path, SearchCriterias=study_to_import) if\n s['Modality'] == \"RTSTRUCT\"]\n if (series_to_import):\n path_series.append(path)\n results_series = []\n # Si le searchCriteria correspond, récupère l'ID du patient, le study instance uid et la série instance\n # uid dans results_series\n for result in series_to_import:\n results_series.append({'PatientID': patient_id, 'StudyInstanceUID': result['StudyInstanceUID'],\n 'SeriesInstanceUID': result['SeriesInstanceUID']})\n total_results_series.append(results_series)\n\n if total_results_series:\n # renvoit tous les RTstruct et les chemins (si plusieurs MAIS NE DEVRAIT PAS ARRIVER)\n return [total_results_series, path_series]\n else:\n return [False, False]\n\n\nclass main_window:\n\n def annulation(self):\n \"\"\"Si l'utilisateur clique sur \"Annuler\" dans la pop up -> quitte le script\"\"\"\n if LOG_FILE:\n sys.stdout = saveout\n fsock.close()\n sys.exit()\n\n def verif_radiobutton(self):\n \"\"\"Fonction vérifiant que toutes les informations nécessaires au script ont été renseigénes dans la pop up:\n :technique de traitement\n :paroi/sein dans le cas unilat\n :paroi/sein pour les 2 côté dans le cas bilat\n Si toutes les informations sont renseignées -> lance la fonction create_struct()\"\"\"\n if self.listvariable_exam.get() == \"\":\n tkinter.messagebox.showerror(\"Erreur\", \"Choisir CT à contourer\")\n return False\n if self.var3DVMAT.get() == \" \":\n tkinter.messagebox.showerror(\"Erreur\", \"Choisir la technique de traitement\")\n return False\n if self.varBilat.get() == \"unilat\":\n if self.varSein_unilat.get() == \" \":\n tkinter.messagebox.showerror(\"Erreur\", \"Sélectionner paroi ou sein et la latéralité\")\n return False\n else:\n self.create_struct()\n return True\n\n elif self.varBilat.get() == \"bilat\":\n if self.varSeinG_bilat.get() == \" \" or self.varSeinD_bilat.get() == \" \":\n tkinter.messagebox.showerror(\"Erreur\", \"Sélectionner paroi ou sein pour les 2 côtés\")\n return False\n else:\n self.create_struct()\n return True\n\n else:\n tkinter.messagebox.showerror(\"Erreur\", \"Choisir si traitement unilatéral ou bilatéral\")\n return False\n\n def create_struct(self):\n \"\"\"Fonction qui :\n :récupère un dictionnaire associant noms français et anglais des ROIs en fonction des choix faits par\n l'utilisateur dans la pop up\n :crée un dictionnaire associant noms anglais des ROIs et leur caractéristiques\n :récupère le 1er CT\n :\"\"\"\n\n # Peut être mettre la fonction en dehors de la class main_window pour plus de clarté ?\n def charge_dict_roi(self):\n \"\"\"Fonction renvoyant un dictionnaire associant les noms français des ROIs (Annotate) et les noms anglais\n des ROIs (RS) ajustés en fonction des informations rentrées dans la pop up (+ courbe CT A ENLEVER)\"\"\"\n\n # Ouverture du fichier .csv contenant la correspondance entre noms des ROIS français Annotate et anglais RS\n file = open(fichier_dictionnaire_annotate, \"r\")\n lines = file.readlines()\n file.close()\n\n dict_roi = {} # dictionnaire de correspondance entre nom des ROIs anglais et français\n\n for line in lines:\n if line.split(';')[0].strip() == \"###\":\n pass\n\n # Lorsque le script lit la ligne commençant par \"###SCAN\" :\n # Récupération de la courbe du scanner GOSIM dans le fichier excel, A ENLEVER\n elif line.split(';')[0].strip() == \"###SCAN\":\n str_courbe_CT2ED = line.split(';')[1].strip()\n if DEBUG:\n Patient = get_current(\"Patient\")\n Patient.Save()\n if (str(Patient.ModificationInfo.SoftwareVersion) == '7.99.3.2'):\n str_courbe_CT2ED = \"SiemensIGR_ssTi\"\n\n # Lorsque le script lit la ligne qui commence par \"####EOF\" = dernière ligne du fichier .csv :\n # Ajouts des noms des CTV français et de leur correspondant anglais au dictionnaire en fonction des\n # choix fait par l'utilisateur dans la pop up\n # SCRIPT FAIT CETTE ACTION EN DERNIER, A METTRE A LA FIN DE LA FONCTION charge_dict_roi()\n elif line.split(';')[0].strip() == \"###EOF\":\n if self.varBilat.get() == \"bilat\":\n if self.varSeinD_bilat.get() == \"paroiD_bilat\":\n dict_roi['Sein D'] = \"CTVp_thoracicw_R_skin\"\n if self.varSeinG_bilat.get() == \"paroiG_bilat\":\n dict_roi['Sein G'] = \"CTVp_thoracicw_L_skin\"\n if self.varBilat.get() == \"unilat\" and (self.varSein_unilat.get() == \"paroiD_unilat\"):\n dict_roi['Sein D'] = \"CTVp_thoracicw_skin\"\n if self.varBilat.get() == \"unilat\" and (self.varSein_unilat.get() == \"paroiG_unilat\"):\n dict_roi['Sein G'] = \"CTVp_thoracicw_skin\"\n\n return dict_roi, str_courbe_CT2ED\n\n else:\n # Toutes les structures du fichier .csv dont la ligne est lu mais qui ne seront pas utilisées du fait\n # des choix de l'utilisateur dans la pop up auront une value \"None\" (voir .csv)\n # Pour la ligne lu par le script :\n # Si l'utilisateur a choisi bilat, ajout des noms des ROIs français d'Annotate en keys (1e colonne\n # du .csv) et des noms des ROIs anglais RS en value (2e colonne pour bilat)\n if self.varBilat.get() == \"bilat\":\n dict_roi[line.split(';')[0].strip()] = line.split(';')[1].strip()\n\n # Si l'utilisateur a choisi unilat et côté droit, ajout des noms des ROIs français d'Annotate en\n # keys (1e colonne du .csv) et des noms des ROIs anglais RS en value (3e colonne pour unilat droit)\n if self.varBilat.get() == \"unilat\" and (\n self.varSein_unilat.get() == \"seinD_unilat\" or self.varSein_unilat.get() == \"paroiD_unilat\"):\n dict_roi[line.split(';')[0].strip()] = line.split(';')[2].strip()\n\n # Si l'utilisateur a choisi unilat et côté gauche, ajout des noms des ROIs français d'Annotate en\n # keys (1e colonne du .csv) et des noms des ROIs anglais RS en value (4e colonne pour unilat gauche)\n if self.varBilat.get() == \"unilat\" and (\n self.varSein_unilat.get() == \"seinG_unilat\" or self.varSein_unilat.get() == \"paroiG_unilat\"):\n dict_roi[line.split(';')[0].strip()] = line.split(';')[3].strip()\n\n self.top.withdraw()\n\n dict_roi, str_courbe_CT2ED = charge_dict_roi(self)\n\n # Création d'un dictionnaire associant les noms anglais de ROI (keys) aux caractéristiques de la ROI (type,\n # couleur, booléen) rentrées en values\n\n ROI_list = {}\n\n ROI_list[\"Heart\"] = [\"Organ\", \"255,84,255\", True]\n ROI_list[\"Spinal_cord\"] = [\"Organ\", \"255,128,64\", True]\n ROI_list[\"External\"] = [\"External\", \"0,128,0\", True]\n ROI_list[\"Larynx\"] = [\"Organ\", \"255,128,64\", True]\n ROI_list[\"Liver\"] = [\"Organ\", \"128,64,64\", True]\n ROI_list[\"Stomach\"] = [\"Organ\", \"128,128,0\", True]\n ROI_list[\"Thyroid\"] = [\"Organ\", \"128,128,255\", True]\n ROI_list[\"Esophagus\"] = [\"Organ\", \"64,0,0\", True]\n\n if self.varBilat.get() == \"unilat\":\n ROI_list[\"Lung_ipsilat\"] = [\"Organ\", \"0,170,0\", True]\n ROI_list[\"Lung_contra\"] = [\"Organ\", \"0,84,255\", True]\n ROI_list[\"Breast_contra\"] = [\"Organ\", \"255,0,255\", True]\n ROI_list[\"HumeralHead\"] = [\"Organ\", \"0,255,128\", True]\n\n # Il faudrait vérifier ici que les GG ne sont pas des cibles avant de les créer (évite d'avoir à les supp après)\n ROI_list[\"CTVn_IMN\"] = [\"CTV\", \"0,255,0\", False]\n ROI_list[\"CTVn_interpec\"] = [\"CTV\", \"255,165,0\", False]\n ROI_list[\"CTVn_L1\"] = [\"CTV\", \"255,128,64\", False]\n ROI_list[\"CTVn_L2\"] = [\"CTV\", \"170,0,126\", False]\n ROI_list[\"CTVn_L3\"] = [\"CTV\", \"243,199,118\", False]\n ROI_list[\"CTVn_L4\"] = [\"CTV\", \"83,66,34\", False]\n\n # Intégration du plexus brachial au dictionnaire si un/des GG a/ont été coché(s) dans la pop up\n if self.int_gg_IMN.get() or self.int_gg_L1.get() or self.int_gg_L2.get() or self.int_gg_L3.get() \\\n or self.int_gg_L4.get() or self.int_gg_interpec.get():\n ROI_list[\"BrachialPlexus\"] = [\"Organ\", \"128,255,0\", True]\n\n if (self.varSein_unilat.get() == \"seinG_unilat\" or self.varSein_unilat.get() == \"seinD_unilat\"):\n ROI_list[\"CTVp_breast_skin\"] = [\"CTV\", \"128,64,64\", True]\n ROI_list[\"CTVp_tumourbed\"] = [\"CTV\", \"139,69,19\", False]\n\n if (self.varSein_unilat.get() == \"paroiG_unilat\" or self.varSein_unilat.get() == \"paroiD_unilat\"):\n ROI_list[\"CTVp_thoracicw_skin\"] = [\"CTV\", \"128,64,0\", True]\n ROI_list[\"Scar\"] = [\"IrradiatedVolume\", \"255,0,0\", True]\n\n if self.varBilat.get() == \"bilat\":\n ROI_list[\"Lung_R\"] = [\"Organ\", \"0,170,0\", True]\n ROI_list[\"Lung_L\"] = [\"Organ\", \"0,84,255\", True]\n ROI_list[\"HumeralHead_R\"] = [\"Organ\", \"0,255,255\", True]\n ROI_list[\"HumeralHead_L\"] = [\"Organ\", \"0,255,128\", True]\n\n ROI_list[\"CTVn_IMN_L\"] = [\"CTV\", \"0,255,0\", False]\n ROI_list[\"CTVn_interpec_L\"] = [\"CTV\", \"0,0,255\", False]\n ROI_list[\"CTVn_L1_L\"] = [\"CTV\", \"255,128,64\", False]\n ROI_list[\"CTVn_L2_L\"] = [\"CTV\", \"170,0,126\", False]\n ROI_list[\"CTVn_L3_L\"] = [\"CTV\", \"243,199,118\", False]\n ROI_list[\"CTVn_L4_L\"] = [\"CTV\", \"83,66,34\", False]\n ROI_list[\"CTVn_IMN_R\"] = [\"CTV\", \"0,255,255\", False]\n ROI_list[\"CTVn_interpec_R\"] = [\"CTV\", \"0,255,255\", False]\n ROI_list[\"CTVn_L1_R\"] = [\"CTV\", \"139,69,19\", False]\n ROI_list[\"CTVn_L2_R\"] = [\"CTV\", \"0,0,255\", False]\n ROI_list[\"CTVn_L3_R\"] = [\"CTV\", \"255,165,0\", False]\n ROI_list[\"CTVn_L4_R\"] = [\"CTV\", \"0,255,255\", False]\n\n if self.varSeinG_bilat.get() == \"seinG_bilat\":\n ROI_list[\"CTVp_breast_L_skin\"] = [\"CTV\", \"128,64,64\", True]\n ROI_list[\"CTVp_tumourbed_L\"] = [\"CTV\", \"139,69,19\", False]\n if self.varSeinG_bilat.get() == \"paroiG_bilat\":\n ROI_list[\"CTVp_thoracicw_L_skin\"] = [\"CTV\", \"255,0,0\", True]\n ROI_list[\"Scar_L\"] = [\"IrradiatedVolume\", \"255,0,0\", True]\n if self.varSeinD_bilat.get() == \"seinD_bilat\":\n ROI_list[\"CTVp_breast_R_skin\"] = [\"CTV\", \"139,69,19\", True]\n ROI_list[\"CTVp_tumourbed_R\"] = [\"CTV\", \"255,255,0\", False]\n if self.varSeinD_bilat.get() == \"paroiD_bilat\":\n ROI_list[\"CTVp_thoracicw_R_skin\"] = [\"CTV\", \"0,128,0\", True]\n ROI_list[\"Scar_R\"] = [\"IrradiatedVolume\", \"255,192,203\", True]\n\n if self.int_gg_IMN.get() or self.int_gg_L1.get() or self.int_gg_L2.get() or self.int_gg_L3.get() \\\n or self.int_gg_L4.get() or self.int_gg_interpec.get():\n ROI_list[\"BrachialPlexus_R\"] = [\"Organ\", \"255,0,255\", True]\n if self.int_gg_IMN2.get() or self.int_gg_L12.get() or self.int_gg_L22.get() or self.int_gg_L32.get() \\\n or self.int_gg_L42.get() or self.int_gg_interpec2.get():\n ROI_list[\"BrachialPlexus_L\"] = [\"Organ\", \"128,255,0\", True]\n\n # Case et patient déjà récupérer dans init -> A METTRE EN ARGUMENTS ?\n Case = get_current(\"Case\")\n Patient = get_current(\"Patient\")\n\n # Récupération de la liste de nom des CTs\n exam_list = find_exam(Case)\n # Si plusieurs noms on récupère celui que l'utilisateur a choisi\n if len(exam_list) > 1:\n exam = self.listvariable_exam.get()\n print('exam récupéré : ', exam)\n # Si un seul CT on le récupère\n if len(exam_list) == 1:\n exam = exam_list[0]\n # Si pas de CT, message d'erreur\n if len(exam_list) == 0:\n tkinter.messagebox.showerror(\"Erreur\", f\"Pas d'examen disponible dans Raystation !\")\n if LOG_FILE:\n sys.stdout = saveout\n fsock.close()\n sys.exit()\n\n ### TEST STUDY SHADOW ET STUDY INSTANCE UID #################################################################\n\n is_study_shadow = False\n is_study_instance_uid_corrupted = False\n\n # Study shadow test\n try:\n study_shadow_test = Case.Examinations[exam].GetStoredDicomTagValueForVerification(Group=0x0008,\n Element=0x0050)\n print(study_shadow_test)\n except:\n is_study_shadow = True\n\n # Study instance UID verification\n study_instance_uid = str(Case.Examinations[exam].GetStoredDicomTagValueForVerification(Group=0x0020,\n Element=0x000D))\n\n # Gets groups separated by '.'\n groups = study_instance_uid.split('.')\n # if group starts with 0 and is not '.0.', study instance uid is corrupted\n is_study_instance_uid_corrupted = any(group.startswith('0') and group != '0' for group in groups)\n\n # Message to display in pop up\n if is_study_shadow and is_study_instance_uid_corrupted:\n message = 'Attention le CT \"' + exam + '\" est un study shadow ET son study Instance UID du CT est ' \\\n 'corrompu, contactez le physicien de garde (4905).'\n elif is_study_shadow and is_study_instance_uid_corrupted == False:\n message = 'Attention, le CT \"' + exam + '\" est un study shadow, contacter le physicien de garde (4905)'\n elif is_study_shadow == False and is_study_instance_uid_corrupted:\n message = 'Attention le Study Instance UID du CT \"' + exam + '\" est corrompu, contactez le physicien ' \\\n 'de garde (4905).'\n if is_study_shadow or is_study_instance_uid_corrupted:\n print(message)\n root_pop_up = Toplevel(self.top)\n Label(root_pop_up, text=message, foreground='red', font='Calibri 12 bold').grid(row=1, column=1, padx=5,\n pady=5)\n Button(root_pop_up, text='OK', command=sys.exit, width=10).grid(row=2, column=1, padx=5, pady=5)\n root_pop_up.bind('', lambda event: sys.exit())\n root_pop_up.bind('', lambda event: sys.exit())\n root_pop_up.protocol(\"WM_DELETE_WINDOW\", sys.exit)\n self.top.mainloop()\n\n ###################################################################################################################\n\n # Avant d'importer on récupère la liste des ROIs présentent initialement dans le case (dans le cas d'une replanif\n # par exemple)\n global initial_rois_in_case\n initial_rois_in_case = [roi.Name for roi in Case.PatientModel.RegionsOfInterest]\n # Récupère le study instance UID du CT choisi\n study_to_import = Case.Examinations[exam].GetAcquisitionDataFromDicom()['StudyModule']['StudyInstanceUID']\n\n # Récupère tous les RTstruct (si plusieurs mais ne devrait pas arriver?) et leur chemin associés\n total_results_series, path_series = return_rtstruct(path_RS, Patient.PatientID, study_to_import)\n\n # Si aucun RT struct avec study instance uid n'a été trouvé -> message d'erreur\n if not total_results_series:\n if not (tkinter.messagebox.askokcancel(\"Avertissement\",\n \"Aucune structure dicom trouvée !\\nAucun contours Annotate ne sera \"\n \"importé.\\nCliquer sur OK pour continuer, Cancel pour arrêter\")):\n if LOG_FILE:\n sys.stdout = saveout\n fsock.close()\n sys.exit()\n\n else:\n # On enregistre avant d'importer (obligatoire) et on importe RT struct\n Patient.Save()\n import_struct(Case.CaseName, total_results_series, path_series)\n\n # Récupération de toutes les ROIs dans RS = toutes les ROIs de Annotate si pas de replanif A MODIFIER\n total_roi_Annotate = [roi.Name for roi in Case.PatientModel.RegionsOfInterest]\n buf_total_roi_Annotate = copy.deepcopy(total_roi_Annotate)\n patient_model = Case.PatientModel\n for key in dict_roi:\n if key in total_roi_Annotate:\n # Cas ou le nom de la roi existe déjà dans la liste des rois présentes avant importation (=replanif)\n if dict_roi[key] not in initial_rois_in_case:\n # Si ROI pas cochée par utilisateur ou pas dans la liste des ROIs qu'on contoure => on la supprime\n if dict_roi[key] == \"none\" or not dict_roi[key] in ROI_list:\n patient_model.RegionsOfInterest[key].DeleteRoi()\n buf_total_roi_Annotate.remove(key)\n # Si elle est cochée ou nécessaire on change son nom et on lui met le bon organtype\n else:\n patient_model.RegionsOfInterest[key].Name = dict_roi[key]\n buf_total_roi_Annotate[buf_total_roi_Annotate.index(key)] = dict_roi[key]\n\n patient_model.RegionsOfInterest[dict_roi[key]].Type = ROI_list[dict_roi[key]][0]\n if ROI_list[dict_roi[key]][0] == 'CTV':\n patient_model.RegionsOfInterest[dict_roi[key]].OrganData.OrganType = \"Target\"\n if ROI_list[dict_roi[key]][0] == 'Organ':\n patient_model.RegionsOfInterest[dict_roi[key]].OrganData.OrganType = \"OrganAtRisk\"\n if ROI_list[dict_roi[key]][0] == 'IrradiatedVolume':\n patient_model.RegionsOfInterest[dict_roi[key]].OrganData.OrganType = \"Target\"\n if ROI_list[dict_roi[key]][0] == 'Undefined':\n patient_model.RegionsOfInterest[dict_roi[key]].OrganData.OrganType = \"Other\"\n patient_model.RegionsOfInterest[dict_roi[key]].Color = ROI_list[dict_roi[key]][1]\n # Cas ou le nom de la roi n'existe pas déjà dans la liste des rois présentes avant importation\n else:\n # Si la n'est pas cochée par utilisateur ou pas dans la liste des ROIs qu'on contoure on ne fait rien\n if dict_roi[key] == \"none\" or not dict_roi[key] in ROI_list:\n pass\n # Si elle est cochée ou nécessaire, on copie sa géom dans la roi pré-existante du nom corresp anglais\n # et on supp la ROI au nom fr\n else:\n patient_model.RegionsOfInterest[dict_roi[key]].CreateAlgebraGeometry(\n Examination=Case.Examinations[exam], Algorithm=\"Auto\",\n ExpressionA={ 'Operation': \"Union\", 'SourceRoiNames': [key], 'MarginSettings': {\n 'Type': \"Expand\", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0,\n 'Right': 0, 'Left': 0}},\n ExpressionB={'Operation': \"Union\", 'SourceRoiNames': [], 'MarginSettings': {\n 'Type': \"Expand\", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0,\n 'Right': 0, 'Left': 0}},\n ResultOperation=\"None\",\n ResultMarginSettings={'Type': \"Expand\", 'Superior': 0, 'Inferior': 0, 'Anterior': 0,\n 'Posterior': 0, 'Right': 0, 'Left': 0})\n # On supprime la ROI au nom français SAUF pour larynx car c'est le même nom qu'en anglais\n # (seul ROI dans ce cas)\n if key != 'Larynx':\n patient_model.RegionsOfInterest[key].DeleteRoi()\n buf_total_roi_Annotate.remove(key)\n total_roi_Annotate = copy.deepcopy(buf_total_roi_Annotate)\n buf_total_roi_Annotate = []\n\n buf_total_roi_Annotate = copy.deepcopy(total_roi_Annotate)\n # On supprime toutes les structures dans RS qui ne sont pas dans les struct prévues pour ce cas (ROI_list)\n for roi in total_roi_Annotate:\n if not roi in ROI_list and not roi in initial_rois_in_case:\n patient_model.RegionsOfInterest[roi].DeleteRoi()\n buf_total_roi_Annotate.remove(roi)\n total_roi_Annotate = copy.deepcopy(buf_total_roi_Annotate)\n buf_total_roi_Annotate = []\n\n roi_dict = patient_model.RegionsOfInterest\n # S'il reste des ROIs de la list prévue (ROI_list) qui ne sont pas créées dans RS on les crée\n for name, [type, color, mandatory] in ROI_list.items():\n if not name in total_roi_Annotate and not name in initial_rois_in_case:\n roi_list_name = [roi.Name for roi in Case.PatientModel.RegionsOfInterest]\n patient_model.CreateRoi(Name=name, Color=color, Type=type, TissueName=None, RbeCellTypeName=None,\n RoiMaterial=None)\n\n # Création du contour externe\n # if 'External' not in initial_rois_in_case:\n # Case.PatientModel.CreateRoi(Name='External', Color=color, Type=type, TissueName=None, RbeCellTypeName=None,\n # RoiMaterial=None)\n roi_dict[\"External\"].CreateExternalGeometry(Examination=Case.Examinations[exam])\n if \"Scar\" in ROI_list.keys():\n if \"Scar\" not in initial_rois_in_case:\n roi_dict[\"Scar\"].OrganData.OrganType = \"Target\"\n if \"Scar_L\" in ROI_list.keys():\n if \"Scar_L\" not in initial_rois_in_case:\n roi_dict[\"Scar_L\"].OrganData.OrganType = \"Target\"\n if \"Scar_R\" in ROI_list.keys():\n if \"Scar_R\" not in initial_rois_in_case:\n roi_dict[\"Scar_R\"].OrganData.OrganType = \"Target\"\n\n # On supprime les CTVn s'ils n'ont pas été sélectionnés par l'utilisateur dans le pop up\n\n if self.varBilat.get() == \"unilat\":\n\n if not self.int_gg_IMN.get():\n del ROI_list[\"CTVn_IMN\"]\n if not 'CTVn_IMN' in initial_rois_in_case:\n roi_dict[\"CTVn_IMN\"].DeleteRoi()\n if not self.int_gg_interpec.get():\n del ROI_list[\"CTVn_interpec\"]\n if not 'CTVn_interpec' in initial_rois_in_case:\n roi_dict[\"CTVn_interpec\"].DeleteRoi()\n if not self.int_gg_L1.get():\n del ROI_list[\"CTVn_L1\"]\n if not 'CTVn_L1' in initial_rois_in_case:\n roi_dict[\"CTVn_L1\"].DeleteRoi()\n if not self.int_gg_L2.get():\n del ROI_list[\"CTVn_L2\"]\n if not 'CTVn_L2' in initial_rois_in_case:\n roi_dict[\"CTVn_L2\"].DeleteRoi()\n if not self.int_gg_L3.get():\n del ROI_list[\"CTVn_L3\"]\n if not 'CTVn_L3' in initial_rois_in_case:\n roi_dict[\"CTVn_L3\"].DeleteRoi()\n if not self.int_gg_L4.get():\n del ROI_list[\"CTVn_L4\"]\n if not 'CTVn_L4' in initial_rois_in_case:\n roi_dict[\"CTVn_L4\"].DeleteRoi()\n\n if self.varBilat.get() == \"bilat\":\n\n if not self.int_gg_IMN.get():\n del ROI_list[\"CTVn_IMN_R\"]\n if not 'CTVn_IMN_R' in initial_rois_in_case:\n roi_dict[\"CTVn_IMN_R\"].DeleteRoi()\n if not self.int_gg_interpec.get():\n del ROI_list[\"CTVn_interpec_R\"]\n if not 'CTVn_interpec_R' in initial_rois_in_case:\n roi_dict[\"CTVn_interpec_R\"].DeleteRoi()\n if not self.int_gg_L1.get():\n del ROI_list[\"CTVn_L1_R\"]\n if not 'CTVn_L1_R' in initial_rois_in_case:\n roi_dict[\"CTVn_L1_R\"].DeleteRoi()\n if not self.int_gg_L2.get():\n del ROI_list[\"CTVn_L2_R\"]\n if not 'CTVn_L2_R' in initial_rois_in_case:\n roi_dict[\"CTVn_L2_R\"].DeleteRoi()\n if not self.int_gg_L3.get():\n del ROI_list[\"CTVn_L3_R\"]\n if not 'CTVn_L3_R' in initial_rois_in_case:\n roi_dict[\"CTVn_L3_R\"].DeleteRoi()\n if not self.int_gg_L4.get():\n del ROI_list[\"CTVn_L4_R\"]\n if not 'CTVn_L4_R' in initial_rois_in_case:\n roi_dict[\"CTVn_L4_R\"].DeleteRoi()\n\n if not self.int_gg_IMN2.get():\n del ROI_list[\"CTVn_IMN_L\"]\n if not 'CTVn_IMN_L' in initial_rois_in_case:\n roi_dict[\"CTVn_IMN_L\"].DeleteRoi()\n if not self.int_gg_interpec2.get():\n del ROI_list[\"CTVn_interpec_L\"]\n if not 'CTVn_interpec_L' in initial_rois_in_case:\n roi_dict[\"CTVn_interpec_L\"].DeleteRoi()\n if not self.int_gg_L12.get():\n del ROI_list[\"CTVn_L1_L\"]\n if not 'CTVn_L1_L' in initial_rois_in_case:\n roi_dict[\"CTVn_L1_L\"].DeleteRoi()\n if not self.int_gg_L22.get():\n del ROI_list[\"CTVn_L2_L\"]\n if not 'CTVn_L2_L' in initial_rois_in_case:\n roi_dict[\"CTVn_L2_L\"].DeleteRoi()\n if not self.int_gg_L32.get():\n del ROI_list[\"CTVn_L3_L\"]\n if not 'CTVn_L3_L' in initial_rois_in_case:\n roi_dict[\"CTVn_L3_L\"].DeleteRoi()\n if not self.int_gg_L42.get():\n del ROI_list[\"CTVn_L4_L\"]\n if not 'CTVn_L4_L' in initial_rois_in_case:\n roi_dict[\"CTVn_L4_L\"].DeleteRoi()\n\n Patient.Save()\n\n self.create_PTV_and_dosi_ROI(Case, exam, [[name, type, color, mandatory] for name, [type, color, mandatory] in\n ROI_list.items()], initial_rois_in_case)\n roi_dict_str_set = Case.PatientModel.StructureSets[exam].RoiGeometries\n\n # Pop up indiquant les ROIs créées n'ayant pas de contours et demander de réaliser les contours à l'utilisateur\n phrase = \"\"\n for name, [type, color, mandatory] in ROI_list.items():\n if not roi_dict_str_set[name].HasContours():\n phrase = phrase + f\"{name} {'(obligatoire)' if ROI_list[name][2] else ''}\" + \"\\n\" #\n\n tkinter.messagebox.showinfo(\"Information\",\n \"Vérifier les contours automatiques et réaliser les contours :\\n\" + phrase)\n\n caseName = [c.CaseName for c in Patient.Cases]\n # # Récupère le nom du dernier case créé MAIS SERT A RIEN JE CROIS, A VERIFIER\n caseName.pop()\n\n # Compte le nombre de case\n i = len(caseName) + 1\n new_caseName = f\"Case\"\n inc_temp_name = new_caseName + f\" {i}\"\n # Vérifie qu'il n'existe pas déjà un Case avec le nom \"Case X\" avec X = nombre de case\n if (new_caseName + f\" {i}\") in ''.join(caseName):\n while inc_temp_name in ''.join(caseName):\n inc_temp_name = new_caseName + f\" {i}\"\n i = i + 1\n\n # Renomme le patient avec le bon numéro de case et define le body site\n Case.CaseName = inc_temp_name\n self.define_body_site(Case)\n Patient.Save()\n if LOG_FILE:\n sys.stdout = saveout\n fsock.close()\n\n # Clique sur les onlgets de RS de manière à se mettre sur Structure Definition\n ui.TitleBar.Navigation.MenuItem[3].Click()\n ui.TitleBar.Navigation.MenuItem[3].Popup.MenuItem[1].Click()\n # ui.TitleBar.Navigation.MenuItem['Patient Modeling'].Popup.MenuItem[\"Structure Definition\"].Click()\n ui.ToolPanel.TabItem['ROIs'].Select()\n\n sys.exit()\n\n # FONCTION A METTRE AILLEURS\n def define_body_site(self, Case):\n \"\"\" Fonctions définissant le body site correspondant aux informations renseignées par l'utilisateur dans la\n pop up dans RS\"\"\"\n if self.varBilat.get() == \"unilat\":\n if self.var3DVMAT.get() == \"3D\":\n if self.varSein_unilat.get() == \"paroiG_unilat\" or self.varSein_unilat.get() == \"seinG_unilat\":\n Case.BodySite = (f\"SEIN-PAROI+/-GG G - RC3D\")\n else:\n Case.BodySite = (f\"SEIN-PAROI+/-GG D - RC3D\")\n\n if self.var3DVMAT.get() == \"TOMO\":\n if self.varSein_unilat.get() == \"paroiG_unilat\" or self.varSein_unilat.get() == \"seinG_unilat\":\n Case.BodySite = (f\"SEIN-PAROI+/-GG G - TOMO\")\n else:\n Case.BodySite = (f\"SEIN-PAROI+/-GG D - TOMO\")\n\n if self.var3DVMAT.get() == \"VMAT\":\n if self.varSein_unilat.get() == \"paroiG_unilat\" or self.varSein_unilat.get() == \"seinG_unilat\":\n Case.BodySite = (f\"SEIN-PAROI+/-GG G - VMAT\")\n else:\n Case.BodySite = (f\"SEIN-PAROI+/-GG D - VMAT\")\n\n if self.var3DVMAT.get() == \"IMRT\":\n if self.varSein_unilat.get() == \"paroiG_unilat\" or self.varSein_unilat.get() == \"seinG_unilat\":\n Case.BodySite = (f\"SEIN-PAROI+/-GG G - IMRT\")\n else:\n Case.BodySite = (f\"SEIN-PAROI+/-GG D - IMRT\")\n\n if self.varBilat.get() == \"bilat\":\n if self.var3DVMAT.get() == \"3D\":\n Case.BodySite = (f\"SEIN-PAROI+/-GG BILAT - RC3D\")\n if self.var3DVMAT.get() == \"TOMO\":\n Case.BodySite = (f\"SEIN-PAROI+/-GG BILAT - TOMO\")\n if self.var3DVMAT.get() == \"VMAT\":\n Case.BodySite = (f\"SEIN-PAROI+/-GG BILAT - VMAT\")\n if self.var3DVMAT.get() == \"IMRT\":\n Case.BodySite = (f\"SEIN-PAROI+/-GG BILAT - IMRT\")\n\n # Fonction à mettre avant (hors de la main window ou dedans plus haut)\n def create_PTV_and_dosi_ROI(self, Case, exam, roi_list, initial_rois_in_case):\n \"\"\"Fonction créant les volumes suivants :\n :Lungs\n :CTVp_breast/CTVp_breast_R et CTVp_breast_L/CTVp_thoracicw/CTVp_thoracicw_R et CTVp_thoracicw_L\n :PTVp_breast/PTVp_breast_R et PTVp_breast_L/PTVp_thoracicw/PTVp_thoracicw_L et PTVp_thoracicw_R\n :PTVp_tumourbed/PTVp_tumourbed_R et PTVp_tumourbed_L\n :CTVn_Ltot/CTVn_Ltot_R et CTVn_Ltot_L\n :PTVn_Ltot/PTVn_Ltot_R et PTVn_Ltot_L\n :PTVn_IMN/PTVn_IMN_L et PTVn_IMN_R\n :PTV TOT\"\"\"\n roi_dict = Case.PatientModel.RegionsOfInterest\n patient_model = Case.PatientModel\n def set_algebra(roi_dict, expression_a, expression_b, result_operation, margin_a, margin_b, margin_result,\n resulting_roi_name):\n ExpressionA = {'Operation': \"Union\", 'SourceRoiNames': expression_a,\n 'MarginSettings': {'Type': \"Expand\", 'Superior': margin_a, 'Inferior': margin_a,\n 'Anterior': margin_a,\n 'Posterior': margin_a, 'Right': margin_a, 'Left': margin_a}}\n ExpressionB = {'Operation': \"Union\", 'SourceRoiNames': expression_b,\n 'MarginSettings': {'Type': \"Contract\", 'Superior': margin_b, 'Inferior': margin_b,\n 'Anterior': margin_b,\n 'Posterior': margin_b, 'Right': margin_b, 'Left': margin_b}}\n ResultOperation = result_operation\n ResultMarginSettings = {'Type': \"Contract\", 'Superior': margin_result, 'Inferior': margin_result,\n 'Anterior': margin_result,\n 'Posterior': margin_result, 'Right': margin_result, 'Left': margin_result}\n roi_dict[resulting_roi_name].SetAlgebraExpression(ExpressionA=ExpressionA, ExpressionB=ExpressionB,\n ResultOperation=ResultOperation,\n ResultMarginSettings=ResultMarginSettings)\n return\n\n # Création du volume Lungs\n # S'il n'existe pas dans les ROIs présentes avant le lancement du script -> on le crée et on lui affecte une\n # géométrie\n if 'Lungs' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"Lungs\", Color=\"170,0,255\", Type=\"Organ\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n if self.varBilat.get() == \"bilat\":\n expression_a = [\"Lung_R\", \"Lung_L\"]\n else:\n expression_a = [\"Lung_ipsilat\", \"Lung_contra\"]\n\n set_algebra(roi_dict, expression_a, [], 'None', 0, 0, 0, 'Lungs')\n\n # S'il existe déjà dans les ROIs présentes av lancement du script (=replanif) on ne fait que l'update en\n # faisant confiance que le booléen a bien été créé la première fois (on est obligé car les structures sont déjà\n # approuvées dans le cas d'une replanif et on ne peut pas les modifier dans cet état)\n roi_dict[\"Lungs\"].UpdateDerivedGeometry(Examination=Case.Examinations[exam], Algorithm=\"Auto\")\n\n # Création du volume CTVp_breast ou CTVp_thoracicw à partir du CTVp_breast_skin ou CTVp_thoracicw_skin\n\n dict_color = {\"CTVp_breast_skin\": \"255,128,255\", \"CTVp_thoracicw_skin\": \"0,255,255\",\n \"CTVp_breast_L_skin\": \"255,128,255\", \"CTVp_breast_R_skin\": \"0,255,255\",\n \"CTVp_thoracicw_L_skin\": \"0,0,255\", \"CTVp_thoracicw_R_skin\": \"255,0,255\"}\n\n for ctv_skin in [c for c in\n [\"CTVp_breast_skin\", \"CTVp_thoracicw_skin\", \"CTVp_breast_L_skin\", \"CTVp_breast_R_skin\",\n \"CTVp_thoracicw_L_skin\", \"CTVp_thoracicw_R_skin\"] if c in [r[0] for r in roi_list]]:\n if ctv_skin[:-5] not in initial_rois_in_case:\n patient_model.CreateRoi(Name=ctv_skin[:-5], Color=dict_color[ctv_skin], Type=\"CTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n if ctv_skin[-6] == 'L':\n marge = float(self.listvariable_margin2.get())\n else:\n marge = float(self.listvariable_margin.get())\n set_algebra(roi_dict, [ctv_skin], ['External'], 'Intersection', 0, marge, 0, ctv_skin[:-5])\n roi_dict[ctv_skin[:-5]].UpdateDerivedGeometry(Examination=Case.Examinations[exam], Algorithm=\"Auto\")\n\n if self.varBilat.get() == \"unilat\":\n # list PTV_tot servira à ajouter toutes les ROIs nécessaires à la construction de la ROI PTV TOT\n PTV_tot = []\n\n if self.varSein_unilat.get() == \"seinG_unilat\" or self.varSein_unilat.get() == \"seinD_unilat\":\n roiBreast = \"CTVp_breast\"\n else:\n roiBreast = \"CTVp_thoracicw\"\n\n # Création du PTV sein/paroi à partir du CTV sein/paroi\n ptv_name = \"PTVp_\" + roiBreast[5:]\n if ptv_name not in initial_rois_in_case:\n patient_model.CreateRoi(Name=ptv_name, Color=\"64,128,128\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, [\"CTVp_\" + roiBreast[5:]], ['External'], 'Intersection', 0.5,\n float(self.listvariable_margin.get()), 0, ptv_name)\n PTV_tot.append(\"PTVp_\" + roiBreast[5:])\n roi_dict[ptv_name].UpdateDerivedGeometry(Examination=Case.Examinations[exam], Algorithm=\"Auto\")\n\n # Création du PTV boost à partir du CTV boost\n if \"CTVp_tumourbed\" in [r[0] for r in roi_list]:\n if 'PTVp_tumourbed' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVp_tumourbed\", Color=\"128,255,0\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVp_tumourbed'], ['PTVp_breast'], 'Intersection', 1, 0, 0,\n 'PTVp_tumourbed')\n roi_dict['PTVp_tumourbed'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n # Création de la ROI CTVn_Ltot\n # Recherche du nombre de GG à ajouter à la ROI\n roi_to_add = []\n if 'CTVn_L1' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L1\")\n\n if 'CTVn_L2' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L2\")\n\n if 'CTVn_L3' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L3\")\n\n if 'CTVn_L4' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L4\")\n\n if 'CTVn_interpec' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_interpec\")\n\n if (roi_to_add):\n if 'CTVn_Ltot' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"CTVn_Ltot\", Color=\"170,0,126\", Type=\"CTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, roi_to_add, [], 'None', 0, 0, 0, 'CTVn_Ltot')\n roi_dict['CTVn_Ltot'].UpdateDerivedGeometry(Examination=Case.Examinations[exam], Algorithm=\"Auto\")\n # Création de la ROI PTVn_Ltot\n if 'PTVn_Ltot' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVn_Ltot\", Color=\"0,0,255\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVn_Ltot'], ['External'], 'Intersection', 0.5,\n float(self.listvariable_margin.get()), 0, 'PTVn_Ltot')\n PTV_tot.append(\"PTVn_Ltot\")\n roi_dict['PTVn_Ltot'].UpdateDerivedGeometry(Examination=Case.Examinations[exam], Algorithm=\"Auto\")\n # Création de la ROI PTVn_IMN\n if 'CTVn_IMN' in [r[0] for r in roi_list]:\n if 'PTVn_IMN' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVn_IMN\", Color=\"128,255,255\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVn_IMN'], ['External'], 'Intersection', 0.5,\n float(self.listvariable_margin.get()), 0, 'PTVn_IMN')\n roi_dict['PTVn_IMN'].UpdateDerivedGeometry(Examination=Case.Examinations[exam], Algorithm=\"Auto\")\n PTV_tot.append(\"PTVn_IMN\")\n\n # Création de la ROI PTV TOT\n if 'PTV TOT' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTV TOT\", Color=\"0,0,64\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, PTV_tot, [], 'None', 0, 0, 0, 'PTV TOT')\n roi_dict['PTV TOT'].UpdateDerivedGeometry(Examination=Case.Examinations[exam], Algorithm=\"Auto\")\n\n if self.varBilat.get() == \"bilat\":\n PTV_tot = []\n\n # Création de la ROI PTVp_thoracicw_L\n if self.varSeinG_bilat.get() == \"paroiG_bilat\":\n if 'PTVp_thoracicw_L' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVp_thoracicw_L\", Color=\"0,0,128\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVp_thoracicw_L'], ['External'], 'Intersection', 0.5,\n float(self.listvariable_margin2.get()), 0, 'PTVp_thoracicw_L')\n PTV_tot.append(\"PTVp_thoracicw_L\")\n roi_dict['PTVp_thoracicw_L'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n # Création de la ROI CTVp_thoracicw_L\n if self.varSeinD_bilat.get() == \"paroiD_bilat\":\n if 'PTVp_thoracicw_R' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVp_thoracicw_R\", Color=\"0,0,255\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVp_thoracicw_R'], ['External'], 'Intersection', 0.5,\n float(self.listvariable_margin.get()), 0, 'PTVp_thoracicw_R')\n PTV_tot.append(\"PTVp_thoracicw_R\")\n roi_dict['PTVp_thoracicw_R'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n # Création de la ROI PTVp_breast_L\n if self.varSeinG_bilat.get() == \"seinG_bilat\":\n if 'PTVp_breast_L' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVp_breast_L\", Color=\"64,128,128\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVp_breast_L'], ['External'], 'Intersection', 0.5,\n float(self.listvariable_margin2.get()), 0, 'PTVp_breast_L')\n PTV_tot.append(\"PTVp_breast_L\")\n roi_dict['PTVp_breast_L'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n # Création de la ROI PTVp_tumourbed_L\n if \"CTVp_tumourbed_L\" in [r[0] for r in roi_list]:\n if 'PTVp_tumourbed_L' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVp_tumourbed_L\", Color=\"128,255,0\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVp_tumourbed_L'], ['PTVp_breast_L'], 'Intersection', 1, 0, 0,\n 'PTVp_tumourbed_L')\n roi_dict['PTVp_tumourbed_L'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n # Création de la ROI PTVp_breast_R\n if self.varSeinD_bilat.get() == \"seinD_bilat\":\n if 'PTVp_breast_R' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVp_breast_R\", Color=\"139,69,19\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVp_breast_R'], ['External'], 'Intersection', 0.5,\n float(self.listvariable_margin.get()), 0, 'PTVp_breast_R')\n PTV_tot.append(\"PTVp_breast_R\")\n roi_dict['PTVp_breast_R'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n # Création de la ROI PTVp_tumourbed_R\n if \"CTVp_tumourbed_R\" in [r[0] for r in roi_list]:\n if 'PTVp_tumourbed_R' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVp_tumourbed_R\", Color=\"255,192,203\", Type=\"PTV\",\n TissueName=None, RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVp_tumourbed_R'], ['PTVp_breast_R'], 'Intersection', 1, 0,\n 0, 'PTVp_tumourbed_R')\n roi_dict['PTVp_tumourbed_R'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n # Création de la ROI CTVn_Ltot_L\n # Recherche du nombre de GG à ajouter à la ROI à G\n roi_to_add = []\n if 'CTVn_L1_L' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L1_L\")\n\n if 'CTVn_L2_L' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L2_L\")\n\n if 'CTVn_L3_L' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L3_L\")\n\n if 'CTVn_L4_L' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L4_L\")\n\n if 'CTVn_interpec_L' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_interpec_L\")\n\n if (roi_to_add):\n if 'CTVn_Ltot_L' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"CTVn_Ltot_L\", Color=\"170,0,126\", Type=\"CTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, roi_to_add, [], 'None', 0, 0, 0, 'CTVn_Ltot_L')\n roi_dict['CTVn_Ltot_L'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n if 'PTVn_Ltot_L' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVn_Ltot_L\", Color=\"170,0,126\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVn_Ltot_L'], [], 'None', 0.5, 0, 0, 'PTVn_Ltot_L')\n PTV_tot.append(\"PTVn_Ltot_L\")\n roi_dict['PTVn_Ltot_L'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n # Création de la ROI PTVn_IMN_L\n if 'CTVn_IMN_L' in [r[0] for r in roi_list]:\n if 'PTVn_IMN_L' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVn_IMN_L\", Color=\"128,255,255\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVn_IMN_L'], [], 'None', 0.5, 0, 0, 'PTVn_IMN_L')\n PTV_tot.append(\"PTVn_IMN_L\")\n roi_dict['PTVn_IMN_L'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n # Création de la ROI CTVn_Ltot_R\n # Recherche du nombre de GG à ajouter à la ROI à R\n roi_to_add = []\n if 'CTVn_L1_R' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L1_R\")\n\n if 'CTVn_L2_R' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L2_R\")\n\n if 'CTVn_L3_R' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L3_R\")\n\n if 'CTVn_L4_R' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_L4_R\")\n\n if 'CTVn_interpec_R' in [r[0] for r in roi_list]:\n roi_to_add.append(\"CTVn_interpec_R\")\n\n if (roi_to_add):\n if 'CTVn_Ltot_R' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"CTVn_Ltot_R\", Color=\"255,0,255\", Type=\"CTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, roi_to_add, [], 'None', 0, 0, 0, 'CTVn_Ltot_R')\n roi_dict['CTVn_Ltot_R'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n # Création de la ROI CTVn_Ltot_G\n if 'PTVn_Ltot_R' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVn_Ltot_R\", Color=\"0,128,0\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVn_Ltot_R'], [], 'None', 0.5, 0, 0, 'PTVn_Ltot_R')\n PTV_tot.append(\"PTVn_Ltot_R\")\n roi_dict['PTVn_Ltot_R'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n if 'CTVn_IMN_R' in [r[0] for r in roi_list]:\n if 'PTVn_IMN_R' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTVn_IMN_R\", Color=\"0,128,0\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['CTVn_IMN_R'], [], 'None', 0.5, 0, 0, 'PTVn_IMN_R')\n PTV_tot.append(\"PTVn_IMN_R\")\n roi_dict['PTVn_IMN_R'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n\n # Création de la ROI PTV TOT\n if 'PTV TOT' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=\"PTV TOT\", Color=\"0,0,64\", Type=\"PTV\", TissueName=None,\n RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, PTV_tot, [], 'None', 0, 0, 0, 'PTV TOT')\n roi_dict['PTV TOT'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n # Création de la ROI External - PTV TOT\n if 'External - PTV TOT' not in initial_rois_in_case:\n patient_model.CreateRoi(Name=r\"External - PTV TOT\", Color=\"Yellow\", Type=\"Undefined\",\n TissueName=None, RbeCellTypeName=None, RoiMaterial=None)\n set_algebra(roi_dict, ['External'], ['PTV TOT'], 'Subtraction', 0, 0, 0, 'External - PTV TOT')\n roi_dict['External - PTV TOT'].UpdateDerivedGeometry(Examination=Case.Examinations[exam],\n Algorithm=\"Auto\")\n def verif_has_contours(self, Case, ROI_list, exam):\n for roi in ROI_list:\n if not Case.PatientModel.StructureSets[exam].RoiGeometries[roi[0]].HasContours() and roi[3]:\n tkinter.messagebox.showerror(\"Erreur\", f\"Le contours {roi[0]} est obligatoire !\")\n return False\n return True\n\n def onClickRadio(self):\n \"\"\"Fonction modifiant la marge par défaut du/des menus déroulants (selon si unilat ou bilat) en fonction de la\n technique\"\"\"\n if self.var3DVMAT.get() == \"3D\":\n self.OptionMenuMargin[\"menu\"].delete(0, \"end\")\n self.OptionMenuMargin2[\"menu\"].delete(0, \"end\")\n margin_list = [\"0.3\", \"0.5\"]\n margin_list2 = [\"0.3\", \"0.5\"]\n self.listvariable_margin.set(margin_list[1])\n self.listvariable_margin2.set(margin_list2[1])\n for value, value2 in zip(margin_list, margin_list2):\n self.OptionMenuMargin[\"menu\"].add_command(label=value,\n command=lambda v=value: self.listvariable_margin.set(v))\n self.OptionMenuMargin2[\"menu\"].add_command(label=value2,\n command=lambda v=value2: self.listvariable_margin2.set(v))\n\n if self.var3DVMAT.get() == \"VMAT\":\n self.OptionMenuMargin[\"menu\"].delete(0, \"end\")\n self.OptionMenuMargin2[\"menu\"].delete(0, \"end\")\n margin_list = [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\"]\n margin_list2 = [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\"]\n self.listvariable_margin.set(margin_list[2])\n self.listvariable_margin2.set(margin_list2[2])\n for value, value2 in zip(margin_list, margin_list2):\n self.OptionMenuMargin[\"menu\"].add_command(label=value,\n command=lambda v=value: self.listvariable_margin.set(v))\n self.OptionMenuMargin2[\"menu\"].add_command(label=value2,\n command=lambda v=value2: self.listvariable_margin2.set(v))\n\n if self.var3DVMAT.get() == \"TOMO\":\n self.OptionMenuMargin[\"menu\"].delete(0, \"end\")\n self.OptionMenuMargin2[\"menu\"].delete(0, \"end\")\n margin_list = [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\"]\n margin_list2 = [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\"]\n self.listvariable_margin.set(margin_list[2])\n self.listvariable_margin2.set(margin_list2[2])\n for value, value2 in zip(margin_list, margin_list2):\n self.OptionMenuMargin[\"menu\"].add_command(label=value,\n command=lambda v=value: self.listvariable_margin.set(v))\n self.OptionMenuMargin2[\"menu\"].add_command(label=value2,\n command=lambda v=value2: self.listvariable_margin2.set(v))\n\n def set_unilat(self):\n \"\"\" Fonction enlevant les radiobuttons et checkbuttons non utiles quand on selectionne unilat :\n :radiobuttons sein droit bi, paroi droite bi, sein gauche bi et paroi gauche bi\n :2e menu déroulant pour marges\n :checkbuttons pour CMI, GG, interpec\n et positionnant les radiobuttons utiles quand on sélection unilat :\n :radiobuttons sein droit uni, sein gauche uni, paroi droite uni et paroi gauche uni\"\"\"\n self.varSein_unilat.set(\" \")\n self.varSeinG_bilat.set(\" \")\n self.varSeinD_bilat.set(\" \")\n\n self.radio_seinD_bilat.grid_forget()\n self.radio_paroiD_bilat.grid_forget()\n self.radio_seinG_bilat.grid_forget()\n self.radio_paroiG_bilat.grid_forget()\n self.label_Margin2.grid_forget()\n self.OptionMenuMargin2.grid_forget()\n\n self.gg_IMN2.grid_forget()\n self.gg_L12.grid_forget()\n self.gg_L22.grid_forget()\n self.gg_L32.grid_forget()\n self.gg_L42.grid_forget()\n self.gg_interpec2.grid_forget()\n\n self.radio_seinD_unilat.grid(row=4, column=0)\n self.radio_paroiD_unilat.grid(row=4, column=1)\n self.radio_seinG_unilat.grid(row=5, column=0)\n self.radio_paroiG_unilat.grid(row=5, column=1)\n\n def set_bilat(self):\n\n self.varSein_unilat.set(\" \")\n self.varSeinG_bilat.set(\" \")\n self.varSeinD_bilat.set(\" \")\n\n self.radio_seinD_unilat.grid_forget()\n self.radio_paroiD_unilat.grid_forget()\n self.radio_seinG_unilat.grid_forget()\n self.radio_paroiG_unilat.grid_forget()\n\n self.radio_seinD_bilat.grid(row=4, column=0)\n self.radio_paroiD_bilat.grid(row=4, column=1)\n self.radio_seinG_bilat.grid(row=5, column=0)\n self.radio_paroiG_bilat.grid(row=5, column=1)\n\n self.gg_IMN2.grid(row=8, column=0)\n self.gg_L12.grid(row=8, column=1)\n self.gg_L22.grid(row=8, column=2)\n self.gg_L32.grid(row=8, column=3)\n self.gg_L42.grid(row=8, column=4)\n self.gg_interpec2.grid(row=8, column=5)\n\n self.label_Margin2.grid(row=4, column=2)\n self.OptionMenuMargin2.grid(row=4, column=3)\n\n def __init__(self):\n\n self.top = Tk()\n\n self.top.title(\"Traitement automatique en sénologie\")\n\n self.top.resizable(0, 0)\n\n\n if DEBUG:\n tkinter.messagebox.showinfo(\"Information - DEBUG\",\n \"Mode débuggage, les RTStructs Annotate ne seront pas supprimés du répertoire d'importation de Raystation\")\n\n # Initialisation des variables associées à chaque radiobutton\n\n # Variable partagée entre les techniques (3D, rIMRT/VMAT/TOMO)\n self.var3DVMAT = StringVar()\n self.var3DVMAT.set(\" \")\n # Variable partagée entre les localisations (unilat/bilat)\n self.varBilat = StringVar()\n self.varBilat.set(\" \")\n # Variable partagée entre le type (Sein D/Sein G/Paroi D/Paroi G) pour unilat\n self.varSein_unilat = StringVar()\n self.varSein_unilat.set(\" \")\n # Variable partagée entre les types (Sein D/Paroi D) pour bilat\n self.varSeinG_bilat = StringVar()\n self.varSeinG_bilat.set(\" \")\n # Variable partagée entre les types (Sein G/Paroi G) pour bilat\n self.varSeinD_bilat = StringVar()\n self.varSeinD_bilat.set(\" \")\n\n # Création des radiobuttons pour technique, localisations et type\n\n # Lorsqu'on sélectionne le radiobutton pour la technique => ajustement des marges par défaut avec onClickRadio()\n self.radio_3D = Radiobutton(self.top, variable=self.var3DVMAT, text=\"3D/rIMRT\", value=\"3D\",\n command=self.onClickRadio)\n self.radio_VMAT = Radiobutton(self.top, variable=self.var3DVMAT, text=\"VMAT\", value=\"VMAT\",\n command=self.onClickRadio)\n self.radio_TOMO = Radiobutton(self.top, variable=self.var3DVMAT, text=\"TOMO\", value=\"TOMO\",\n command=self.onClickRadio)\n\n # Lorsqu'on sélectionne unilat/bilat -> ajustement des radiobuttons et checkbuttons concernés dans la fenêtre\n # avec set_unilat/set_bilat\n self.radio_unilat = Radiobutton(self.top, variable=self.varBilat, text=\"unilat\", value=\"unilat\",\n command=self.set_unilat)\n self.radio_bilat = Radiobutton(self.top, variable=self.varBilat, text=\"bilat\", value=\"bilat\",\n command=self.set_bilat)\n\n # Radiobuttons pour la latéralité et le type dans le cas unilat\n self.radio_seinD_unilat = Radiobutton(self.top, variable=self.varSein_unilat, text=\"sein droit uni\",\n value=\"seinD_unilat\")\n self.radio_paroiD_unilat = Radiobutton(self.top, variable=self.varSein_unilat, text=\"paroi droite uni\",\n value=\"paroiD_unilat\")\n self.radio_seinG_unilat = Radiobutton(self.top, variable=self.varSein_unilat, text=\"sein gauche uni\",\n value=\"seinG_unilat\")\n self.radio_paroiG_unilat = Radiobutton(self.top, variable=self.varSein_unilat, text=\"paroi gauche uni\",\n value=\"paroiG_unilat\")\n\n # Radiobuttons pour la latéralité et le type dans le cas bilat\n self.radio_seinD_bilat = Radiobutton(self.top, variable=self.varSeinD_bilat, text=\"sein droit bi\",\n value=\"seinD_bilat\")\n self.radio_paroiD_bilat = Radiobutton(self.top, variable=self.varSeinD_bilat, text=\"paroi droite bi\",\n value=\"paroiD_bilat\")\n\n self.radio_seinG_bilat = Radiobutton(self.top, variable=self.varSeinG_bilat, text=\"sein gauche bi\",\n value=\"seinG_bilat\")\n self.radio_paroiG_bilat = Radiobutton(self.top, variable=self.varSeinG_bilat, text=\"paroi gauche bi\",\n value=\"paroiG_bilat\")\n\n # Initialisation des variables associées à chaque checkbutton\n\n # Variables pour CMI, GG et interpec dans le cas unilat\n self.int_gg_IMN = IntVar()\n self.int_gg_L1 = IntVar()\n self.int_gg_L2 = IntVar()\n self.int_gg_L3 = IntVar()\n self.int_gg_L4 = IntVar()\n self.int_gg_interpec = IntVar()\n # Variables en doublon CMI, GG et interpec dans le cas bilat\n self.int_gg_IMN2 = IntVar()\n self.int_gg_L12 = IntVar()\n self.int_gg_L22 = IntVar()\n self.int_gg_L32 = IntVar()\n self.int_gg_L42 = IntVar()\n self.int_gg_interpec2 = IntVar()\n\n # Création des checkbuttons pour CMI, GG et interpec\n\n self.gg_IMN = Checkbutton(self.top, text=\"CMI\", variable=self.int_gg_IMN)\n self.gg_L1 = Checkbutton(self.top, text=\"L1\", variable=self.int_gg_L1)\n self.gg_L2 = Checkbutton(self.top, text=\"L2\", variable=self.int_gg_L2)\n self.gg_L3 = Checkbutton(self.top, text=\"L3\", variable=self.int_gg_L3)\n self.gg_L4 = Checkbutton(self.top, text=\"L4\", variable=self.int_gg_L4)\n self.gg_interpec = Checkbutton(self.top, text=\"Interpec\", variable=self.int_gg_interpec)\n\n # Création des checkbuttons pour CMI, GG et interpec en doublons dans le cas bilt\n\n self.gg_IMN2 = Checkbutton(self.top, text=\"CMI PTV G\", variable=self.int_gg_IMN2)\n self.gg_L12 = Checkbutton(self.top, text=\"L1 PTV G\", variable=self.int_gg_L12)\n self.gg_L22 = Checkbutton(self.top, text=\"L2 PTV G\", variable=self.int_gg_L22)\n self.gg_L32 = Checkbutton(self.top, text=\"L3 PTV G\", variable=self.int_gg_L32)\n self.gg_L42 = Checkbutton(self.top, text=\"L4 PTV G\", variable=self.int_gg_L42)\n self.gg_interpec2 = Checkbutton(self.top, text=\"Interpec PTV G\", variable=self.int_gg_interpec2)\n\n # Si bouton OK cliqué => vérifie que toutes les infos ont bien été renseignées et lance la fn create_struct()\n self.buttonOK = Button(self.top, text=\"OK\", command=self.verif_radiobutton)\n self.buttonAnnuler = Button(self.top, text=\"Annuler\", command=self.annulation)\n\n # Positionnement des radiobuttons et checkbuttons dans la pop up\n\n self.radio_3D.grid(row=2, column=0)\n self.radio_VMAT.grid(row=2, column=1)\n self.radio_TOMO.grid(row=2, column=2)\n self.radio_unilat.grid(row=3, column=0)\n self.radio_bilat.grid(row=3, column=1)\n\n self.radio_seinD_unilat.grid(row=4, column=0)\n self.radio_paroiD_unilat.grid(row=4, column=1)\n self.radio_seinG_unilat.grid(row=5, column=0)\n self.radio_paroiG_unilat.grid(row=5, column=1)\n\n self.gg_IMN.grid(row=7, column=0)\n self.gg_L1.grid(row=7, column=1)\n self.gg_L2.grid(row=7, column=2)\n self.gg_L3.grid(row=7, column=3)\n self.gg_L4.grid(row=7, column=4)\n self.gg_interpec.grid(row=7, column=5)\n\n Case = get_current(\"Case\")\n # Récupération de la liste des noms des CTs du Case\n exam_list = find_exam(Case)\n patient = get_current(\"Patient\")\n initial_rois_in_case = None\n patient.Save()\n\n phrase_case = \"#############\\nScript Seno Version : \" + str(VersionScript) + \"\\nVersion Raystation : \" + str(\n patient.ModificationInfo.SoftwareVersion) + \"\\nDate : \" + str(Date_et_Heure) + \"\\nUtilisateur : \" + str(\n patient.ModificationInfo.UserName)\n Case.Comments = phrase_case\n\n # S'il y a plus d'un CT : menu déroulant pour choisir sur lequel contourer\n self.label_exam = Label(self.top, text=\"CT à contourer : \", foreground='red', font='Calibri 12 bold')\n # self.label_exam.grid(row=1, column=1)\n self.listvariable_exam = ttk.Combobox(self.top, values=exam_list, width=40,\n state='readonly')\n self.listvariable_exam.grid(row=1, column=2, pady=10)\n\n\n # Création d'un menu déroulant pour la marge souhaitée\n self.label_Margin = Label(self.top, text=\"Retrait du PTV par rapport à la peau : \")\n self.listvariable_margin = StringVar()\n margin_list = [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\"]\n self.listvariable_margin.set(margin_list[2]) # Par défaut marge 0.3\n self.OptionMenuMargin = OptionMenu(self.top, self.listvariable_margin, *margin_list)\n\n # Création 2e menu déroulant pour la marge souhaitée dans le cas bilat\n self.label_Margin2 = Label(self.top, text=\"Retrait du PTV G par rapport à la peau : \")\n self.listvariable_margin2 = StringVar()\n margin_list2 = [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\"]\n self.listvariable_margin2.set(margin_list2[2])\n self.OptionMenuMargin2 = OptionMenu(self.top, self.listvariable_margin2, *margin_list2)\n\n # Positionnement menu déroulant list exam, margin et boutons OK et annuler\n # MANQUE POSITIONNEMENT MENU DEROULANT EXAMINATIONS\n self.label_exam.grid(row=1, column=1)\n self.label_Margin.grid(row=4, column=2)\n self.OptionMenuMargin.grid(row=4, column=3)\n\n self.buttonOK.grid(row=10, column=0, columnspan=2)\n self.buttonAnnuler.grid(row=10, column=1)\n\n self.top.mainloop()\n\nmain_window()\n\n\n","repo_name":"cmilew/script_physics","sub_path":"Patient Modeling/Contour breast ROIs.py","file_name":"Contour breast ROIs.py","file_ext":"py","file_size_in_byte":71165,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20263675339","text":"'''\nSupervised training framework of speaker verification\nTraining data: labeled source domain data (Vox2) + pseudo-labeled target domain data (CN1)\nECAPA-TDNN\nLoss: AAM-softmax\n'''\n\nfrom omegaconf import OmegaConf\nimport os\nfrom model import ECAPA_TDNN\nfrom wav_dset import wav_dset\nimport torch\nfrom trainer import Trainer\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# load config file\ncfg = OmegaConf.load('./config.yaml')\n# create folders\ncheckpoint_save_path = os.path.join(cfg.save_path, 'checkpoint')\nlogger_save_path = os.path.join(cfg.save_path, 'logger')\nos.makedirs(checkpoint_save_path, exist_ok=True)\nos.makedirs(logger_save_path, exist_ok=True)\n\n# initialise\nmodel= ECAPA_TDNN(C=1024, n_class=cfg.n_class, m=cfg.m, s=cfg.s)\ntrain_dataset = wav_dset(cfg)\ntrain_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=cfg.batch_size,\n shuffle=True,\n num_workers=cfg.n_thread,\n drop_last=True,\n )\noptim = torch.optim.Adam(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay)\nscheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=cfg.step_size, gamma=cfg.lr_decay)\n\ntrainer = Trainer(cfg,\n model=model,\n train_loader=train_loader,\n optim=optim,\n scheduler=scheduler,\n )\n\n# start training\ntrainer.train()\n","repo_name":"Maohq97/Cluster-GuidedUDA","sub_path":"3-Supervised_training/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"203578607","text":"from cgi import print_form\nfrom pprint import pprint\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.messagebox import showinfo\nfrom tkinter import filedialog as fd\nimport pandas as pd\nimport pyodbc\nimport openpyxl\n\n# root window\nroot = tk.Tk()\nroot.geometry('300x200')\nroot.resizable(False, False)\nroot.title('Importador de datos')\n\ndef select_file():\n \n filetypes = (\n ('excel files','*.csv'),\n ('excel files','*.xlsx'),\n ('text files', '*.txt'),\n ('All files', '*.*')\n )\n\n #abre explorador de archivos\n filename = fd.askopenfilename(\n title='Open a file',\n initialdir='/',\n filetypes=filetypes)\n\n #mensajito de archivo seleccionado\n showinfo(\n title='Selected File',\n message=filename\n )\n return filename\n\n# download button\ndef download_clicked():\n \n arch = select_file()\n\n\n # Importar excel/CSV\n data = pd.read_excel (arch) \n df = pd.DataFrame(data)\n #crea boton que al apretarlo ejecute todo este script siguiente\n # Connectar a SQL Server\n conn = pyodbc.connect('Driver={SQL Server};'\n 'Server=DESKTOP-1PSC1H3\\SQLEXPRESS;'\n 'Database=Alumnos;'\n 'Trusted_Connection=yes;')\n cursor = conn.cursor()\n # Crea la Tabla (No es necesario porque la tabla ya va a estar creada)\n # cursor.execute('''\n # CREATE TABLE fichas (\n # DNI nvarchar(255) primary key,\n # Nombre_Alumno nvarchar(255),\n # Celular nvarchar(255),\n # Mail nvarchar(255),\n # Fecha_de_Nacimiento nvarchar(255),\n # Ciudad_de_Residencia nvarchar(255),\n ## Pais_de_Residencia nvarchar(255),\n # Ciudad_de_Nacimiento nvarchar(255),\n # Provincia_de_Nacimiento nvarchar(255),\n # Pais_de_Nacimiento nvarchar(255),\n # Estado_Civil nvarchar(255),\n # Trabaja nvarchar(255),\n # Obra_Social nvarchar(255),\n # Nombre_Obra_Social nvarchar(255)\n # )\n # '''\n # )\n # Arma un dataframe con lo que haya en el excel y lo inserta a la tabla\n for row in df.itertuples():\n \n cursor.execute('''\n INSERT INTO fichas (DNI, Nombre_Alumno, Domicilio, Celular, Mail, Fecha_de_Nacimiento, Ciudad_de_Residencia, Provincia_de_Residencia, Pais_de_Residencia, Ciudad_de_Nacimiento, Provincia_de_Nacimiento, Pais_de_Nacimiento, Estado_Civil, Trabaja, Obra_Social, Nombre_Obra_Social)\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\n ''',\n str(row.DNI), \n str(row.Nombre_Alumno),\n str(row.Domicilio),\n str(row.Celular),\n str(row.Mail),\n str(row.Fecha_de_Nacimiento),\n str(row.Ciudad_de_Residencia),\n str(row.Provincia_de_Residencia),\n str(row.Pais_de_Residencia),\n str(row.Ciudad_de_Nacimiento),\n str(row.Provincia_de_Nacimiento),\n str(row.Pais_de_Nacimiento),\n str(row.Estado_Civil),\n str(row.Trabaja),\n str(row.Obra_Social),\n str(row.Nombre_Obra_Social)\n )\n showinfo(\n title='Valor Columna', \n message=str(row.Nombre_Obra_Social)\n ) \n\n conn.commit()\n \n cursor.execute('''EXECUTE('sp_reemplaza_nan')''')\n\n conn.commit()\n \n conn.close()\n \n showinfo(\n title='Information',\n message='TRANSFERENCIA EXITOSA'\n )\n\n\n# al apretar\ndownload_button = ttk.Button(\n root,\n command=download_clicked\n)\ndownload_button.config(text=\"Importar archivo\")\n\n#posicion y tamano\ndownload_button.pack(\n ipadx=5,\n ipady=5,\n expand=True\n)\n\n#inicio\nroot.mainloop()\n","repo_name":"Gallinita10/Programa_importar","sub_path":"Importador de tabla.py","file_name":"Importador de tabla.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72320414026","text":"#!/usr/bin/env python\n\"\"\"\n.. module:: state_machine\n\t:platform: Unix\n\t:synopsis: Python module for the Final State Machine\n \n.. moduleauthor:: Francesco Ferrazzi \n\nROS node for the second assignment of the Experimental Robotics course of the Robotics Engineering\nMaster program. The software architecture allows initializing a Final State Machine which controls \nthe behavior of a surveillance robot. \nThe scenario involves a robot deployed in an indoor environment for surveillance purposes.\nThe robot's objective is to visit different locations, which are rooms and corridors, and stay there \nfor some time. The robot starts in the E, which is the charging location, and waits until it receives \nthe information to build the topological map. The robot moves to a new location and do a surveillance\ntask before it checks another location. This behavior is repeated until the program is not shut down.\nWhen the robot's battery is low, it goes to the charging location and waits some time before it starts \nagain the just explained behavior. When the robot's battery is not low, it should move among locations \nwith the following policy:\n1) It should mainly stay in corridors.\n2) If a reachable room has not been visited for a fixed time, the room becomes urgent and the robot visits it.\nThe subscriptions, publishers, services, and service actions are defined and utilized in the helper node of\nthe final state machine called final_state_machine.py..\n\t\t\n\"\"\"\n\n# Import libraries\nimport roslib\nimport rospy\nimport smach\nimport smach_ros\nimport time\nimport random\nfrom std_msgs.msg import String, Float64, Bool, Float32\n\n# Import the class that decouples the interface of the Finite State Machine with\n# the other nodes of the architecture from the actual implementation of the\n# Finite State Machine, which is available in this file.\nfrom exprob_assignment2.state_machine_helper import Helper\n\n# Import constant name defined to structure the architecture.\nfrom exprob_assignment2 import architecture_name_mapper as anm\n\n# The list of names that identify the states of the Finite State Machine.\nSTATE_CHARGE = 'CHARGE' # State where the robot recharges its battery.\nSTATE_BUILD_WORLD = 'BUILDWORLD' # State where the environment is build using informations from the aruco.\nSTATE_REASONER = 'REASONER' # State that decides the next location that will be visisted.\nSTATE_MOTION = 'MOTION' # State that allows the robot to move in the environement.\nSTATE_REACH_CHARGE = 'REACHCHARGE' # State used to let the robot reach the charging station.\nSTATE_SURVEILLANCE = 'SURVEILLANCE' # State that checks the location in which the root stops.\n\n\n# The list of names that identify the transitions of the Finite State Machine.\nTRANS_BATTERY_LOW = 'battery_low' # The transition from the `REASONER`, 'MOTION' and `SURVEILLANCE` states toward the `REACHCHARGE` state.\nTRANS_BATTERY_OK = 'battery_ok' # The transition from the `CHARGE` state to the `REASONER` state.\nTRANS_CHECK_LOC = 'check_loc' # The transition from the `MOTION` state to the `SURVEILLANCE` state.\nTRANS_INFO_DONE = 'info_done' # The transition from the `REASONER` state to the `MOTION` state.\nTRANS_WORLD_DONE = 'world_done' # The transition from the `BUILDWORLD` state with to the 'REASONER' state.\nTRANS_CHARGE_ON = 'charge_on' # The transition from the 'REACHCHARGE' state toward the `CAHRGE` state.\nTRANS_CHECK_DONE = 'check_done' # The transition from the 'SURVEILLANCE' state toward the `REASONER` state.\n\n\n# Initialize and define the tag for identifying logs producer.\nLOG_TAG = anm.NODE_STATE_MACHINE\t\t\t\t\t\t\t\t\t\t \n\n\nclass BuildWorld(smach.State):\n\t\"\"\" \n\tClass that defines the state: BUILDWORLD.\n\t\t\n\n\t\"\"\"\n\tdef __init__(self, helper):\n\t\t\"\"\" \n\t\tMethod that initializes the state BUILDWORLD.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\thelper: instance of the class Helper() allocated in state_machine_helper.py`\n\n\t\t\"\"\"\n\t\tsmach.State.__init__(self, outcomes = [TRANS_BATTERY_LOW, TRANS_BATTERY_OK, TRANS_CHECK_LOC, TRANS_INFO_DONE, TRANS_WORLD_DONE, TRANS_CHARGE_ON, TRANS_CHECK_DONE])\n\t\tself._helper = helper\n\t\t\t\t\t\t\t\t \n\tdef execute(self, userdata):\n\t\t\"\"\" \n\t\tMethod which is executed before exiting the state BUILDWORLD. This method generates the \n\t\tenvironment. First of all, informations from aruco markers are retreived, looping until\n\t\tall of them are detected (7 markers). Once they have been detected, the build_environment()\n\t\tmethod is called to put informations on the already given ontology using the Armor server.\n\t\tWhen the environment is built, the transition to the next state occurs.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\tuserdata: shared variable between the states of the Final State Machine\n\n\t\tReturns:\n\t\t\tTRANS_WORLD_DONE: is the transition to go from the BUILDWORLD state to the REASONER state.\n\t\t\n\t\t\"\"\"\n\t\tlog_msg = f'\\n\\n################# Executing state BUILD WORLD #################\\n'\n\t\trospy.loginfo(anm.tag_log(log_msg, LOG_TAG)) \n\t\tr = rospy.Rate(10)\n\t\twhile not self._helper.aruco_done():\n\t\t\t# Wait until all arucos have been detected before building the world\n\t\t\tr.sleep()\n\t\t# Build the world once the markers have been detected\n\t\tself._helper.build_environment() \n\t\twhile not rospy.is_shutdown():\n\t\t\tself._helper.mutex.acquire()\n\t\t\ttry:\n\t\t\t\tif self._helper.world_done():\n\t\t\t\t\treturn TRANS_WORLD_DONE\n\t\t\tfinally:\n\t\t\t\t\tself._helper.mutex.release()\n\t\t\t\t\t\n\n\t\t\t\t\t\nclass Charge(smach.State):\n\t\"\"\" \n\tClass that defines the state: CHARGE.\n\t\t\n\n\t\"\"\"\n\tdef __init__(self, helper):\n\t\t\"\"\" \n\t\tMethod that initializes the state CHARGE.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\thelper: instance of the class Helper() allocated in state_machine_helper.py`\n\n\t\t\"\"\"\n\t\tsmach.State.__init__(self, outcomes = [TRANS_BATTERY_LOW, TRANS_BATTERY_OK, TRANS_CHECK_LOC, TRANS_INFO_DONE, TRANS_WORLD_DONE, TRANS_CHARGE_ON, TRANS_CHECK_DONE])\n\t\tself._helper = helper\n\n\tdef execute(self, userdata):\n\t\t\"\"\" \n\t\tMethod which is executed before exiting the state CHARGE. This method makes the robot \n\t\tcharge itself relying on the method recharge_srv() defined in the helper node.\n\t\tWhen the battery is charged, the transition to the next state occurs.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\tuserdata: shared variable between the states of the Final State Machine\n\n\t\tReturns:\n\t\t\tTRANS_BATTERY_OK: is the transition to go from the CHARGE state to the REASONER state.\n\t\t\n\t\t\"\"\"\n\t\tlog_msg = f'\\n\\n################# Executing state CHARGE #################\\n'\n\t\trospy.loginfo(anm.tag_log(log_msg, LOG_TAG))\n\t\tself._helper.recharge_srv()\n\t\twhile not rospy.is_shutdown():\n\t\t\tself._helper.mutex.acquire()\n\t\t\ttry:\n\t\t\t\tif not self._helper.ret_battery_low():\n\t\t\t\t\treturn TRANS_BATTERY_OK\n\t\t\tfinally:\n\t\t\t\tself._helper.mutex.release()\t\t\t\n\n\t\t\t\n\nclass ReachCharge(smach.State):\n\t\"\"\" \n\tClass that defines the state: REACHCHARGE.\n\t\t\n\n\t\"\"\"\n\tdef __init__(self, helper):\n\t\t\"\"\" \n\t\tMethod that initializes the state REACHCHARGE.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\thelper: instance of the class Helper() allocated in state_machine_helper.py`\n\n\t\t\"\"\"\n\t\tsmach.State.__init__(self, outcomes = [TRANS_BATTERY_LOW, TRANS_BATTERY_OK, TRANS_CHECK_LOC, TRANS_INFO_DONE, TRANS_WORLD_DONE, TRANS_CHARGE_ON, TRANS_CHECK_DONE])\n\t\tself._helper = helper\n\t\t\t\n\tdef execute(self, userdata):\n\t\t\"\"\" \n\t\tMethod which is executed before exiting the state REACHCHARGE. This method makes the \n\t\trobot go to the charging location 'E' by calling the method go_to_charge() defined in \n\t\tthe helper node. \n\t\tWhen the robot reaches the charging location, the transition to the next state occurs.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\tuserdata: shared variable between the states of the Final State Machine\n\n\t\tReturns:\n\t\t\tTRANS_CHARGE_ON: is the transition to go from the REACHCHARGE state to the CHARGE state.\n\t\t\n\t\t\"\"\"\n\t\tlog_msg = f'\\n\\n################# Executing state REACH CHARGE #################\\n'\n\t\trospy.loginfo(anm.tag_log(log_msg, LOG_TAG))\n\t\tself._helper.go_to_charge()\n\t\twhile not rospy.is_shutdown():\n\t\t\tself._helper.mutex.acquire()\n\t\t\ttry:\n\t\t\t\tif self._helper.charge_ready():\n\t\t\t\t\treturn TRANS_CHARGE_ON\n\t\t\tfinally:\n\t\t\t\tself._helper.mutex.release()\n\t\t\n\n\nclass Reasoner(smach.State):\n\t\"\"\" \n\tClass that defines the state: REASONER.\n\t\t\n\n\t\"\"\"\n\tdef __init__(self, helper):\n\t\t\"\"\" \n\t\tFunction that initializes the state REASONER.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\thelper: instance of the class Helper() allocated in state_machine_helper.py`\n\n\t\t\"\"\"\n\t\tsmach.State.__init__(self, outcomes = [TRANS_BATTERY_LOW, TRANS_BATTERY_OK, TRANS_CHECK_LOC, TRANS_INFO_DONE, TRANS_WORLD_DONE, TRANS_CHARGE_ON, TRANS_CHECK_DONE])\n\t\tself._helper = helper\n\t\t\t\n\tdef execute(self, userdata):\n\t\t\"\"\" \n\t\tMethod which is executed before exiting the state REASONER. This function makes the robot\n\t\treason in order to achieve the wanted behavior for the surveillance robot, by calling the\n\t\tmethod reason() defined in the helper node. When the robot finishes to query the ontology, \n\t\tthe power level of the battery is checked. If the battery gets low during execution, the next \n\t\tstate to be executed will be REACHCHARGE, else it will be executed the MOTION state.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\tuserdata: shared variable between the states of the Final State Machine\n\n\t\tReturns:\n\t\t\tTRANS_BATTERY_LOW: is the transition to go from the REASONER state to the REACHCHARGE state.\n\t\t\tTRANS_INFO_DONE: is the transition to go from the REASONER state to the MOTION state.\n\t\t\n\t\t\"\"\"\n\t\tlog_msg = f'\\n\\n################# Executing state REASONER #################\\n'\n\t\trospy.loginfo(anm.tag_log(log_msg, LOG_TAG))\n\t\tgoal_location = self._helper.reason()\n\t\tlog_msg = f'NEXT GOAL: {goal_location}\\n\\n'\n\t\trospy.loginfo(anm.tag_log(log_msg, LOG_TAG))\n\t\twhile not rospy.is_shutdown():\n\t\t\tself._helper.mutex.acquire()\n\t\t\ttry:\n\t\t\t\tif self._helper.ret_battery_low():\n\t\t\t\t\tself._helper.cancel_motion()\n\t\t\t\t\treturn TRANS_BATTERY_LOW\n\t\t\t\tif self._helper.reason_done():\n\t\t\t\t\treturn TRANS_INFO_DONE\n\t\t\tfinally:\n\t\t\t\tself._helper.mutex.release() \t\t\n\n\n\nclass Motion(smach.State):\n\t\"\"\" \n\tClass that defines the state: MOTION.\n\t\t\n\n\t\"\"\"\n\tdef __init__(self, helper):\n\t\t\"\"\" \n\t\tFunction that initializes the state MOTION.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\thelper: instance of the class Helper() allocated in state_machine_helper.py`\n\n\t\t\"\"\"\n\t\tsmach.State.__init__(self, outcomes = [TRANS_BATTERY_LOW, TRANS_BATTERY_OK, TRANS_CHECK_LOC, TRANS_INFO_DONE, TRANS_WORLD_DONE, TRANS_CHARGE_ON, TRANS_CHECK_DONE])\n\t\tself._helper = helper \n\t\t\t\n\tdef execute(self, userdata):\n\t\t\"\"\" \n\t\tFunction which is executed before exiting the state MOTION. This method allows to make the robot\n\t\tcreate a path to go from its current postion to the desired position retrieved thanks to the \n\t\tREASONER. It also controls that the robot follows the programmed path during the whole duration\n\t\tof the motion. Therefore, the robot goes autonomusly from the current position to the goal.\n\t\tThis is possible thanks to a SLAM algoithm which localizes the robot in the environment and maps\n\t\tthe sorroundings. \n\t\tThe action is completed when the robot reaches the target. If the battery gets low during execution, \n\t\tthe next state to be executed will be REACHCHARGE, else it will be executed the SURVEILLANCE state.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\tuserdata: shared variable between the states of the Final State Machine\n\n\t\tReturns:\n\t\t\tTRANS_BATTERY_LOW: is the transition to go from the MOTION state to the REACHCHARGE state.\n\t\t\tTRANS_CHECK_LOC: is the transition to go from the MOTION state to the SURVEILLANCE state.\n\t\t\n\t\t\"\"\"\n\t\tlog_msg = f'\\n\\n################# Executing state MOTION #################\\n'\n\t\trospy.loginfo(anm.tag_log(log_msg, LOG_TAG))\n\t\tself._helper.go_to_goal()\n\t\twhile not rospy.is_shutdown():\n\t\t\tself._helper.mutex.acquire()\n\t\t\ttry:\n\t\t\t\tself._helper.check_motion()\n\t\t\t\tif self._helper.ret_battery_low():\n\t\t\t\t\tself._helper.cancel_motion()\n\t\t\t\t\treturn TRANS_BATTERY_LOW\n\t\t\t\tif self._helper.motion_done():\n\t\t\t\t\treturn TRANS_CHECK_LOC\n\t\t\tfinally:\n\t\t\t\tself._helper.mutex.release()\n\n\n\nclass Surveillance(smach.State):\n\t\"\"\" \n\tClass that defines the state: SURVEILLANCE.\n\t\t\n\n\t\"\"\"\n\tdef __init__(self, helper):\n\t\t\"\"\" \n\t\tMethod that initializes the state SURVEILLANCE.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\thelper: instance of the class Helper() allocated in state_machine_helper.py`\n\n\t\t\"\"\"\n\t\tsmach.State.__init__(self, outcomes = [TRANS_BATTERY_LOW, TRANS_BATTERY_OK, TRANS_CHECK_LOC, TRANS_INFO_DONE, TRANS_WORLD_DONE, TRANS_CHARGE_ON, TRANS_CHECK_DONE])\n\t\tself._helper = helper \n\t\t\n\tdef execute(self, userdata):\n\t\t\"\"\" \n\t\tMethod which is executed before exiting the state SURVEILLANCE. This method simulates a\n\t\tsurveillance task when the robot arrives at a specific location.\n\t\tIt makes the base joint of the arm rotate around itself of 360 dergees, while the camera\n\t\tplaced on the top of the arm acquires images. Meanwhile, the state of the battery is checked.\n\t\tIf the battery gets low during execution, the next state to be executed will be REACHCHARGE, \n\t\telse it will be executed the REASONER state.\n\t\t\n\t\tArgs:\n\t\t\tself: instance of the current class.\n\t\t\tuserdata: shared variable between the states of the Final State Machine\n\n\t\tReturns:\n\t\t\tTRANS_BATTERY_LOW: is the transition to go from the SURVEILLANCE state to the REACHCHARGE state.\n\t\t\tTRANS_CHECK_DONE: is the transition to go from the SURVEILLANCE state to the REASONER state.\n\t\t\n\t\t\"\"\"\n\t\tlog_msg = f'\\n\\n################# Executing state SURVEILLANCE #################\\n'\n\t\trospy.loginfo(anm.tag_log(log_msg, LOG_TAG))\n\t\tself._helper.do_surveillance()\n\t\twhile not rospy.is_shutdown():\n\t\t\tself._helper.mutex.acquire()\n\t\t\ttry:\n\t\t\t\tif self._helper.ret_battery_low():\n\t\t\t\t\tself._helper.cancel_motion()\n\t\t\t\t\treturn TRANS_BATTERY_LOW\n\t\t\t\tif self._helper.surveillance_done():\n\t\t\t\t\treturn TRANS_CHECK_DONE\n\t\t\tfinally:\n\t\t\t\tself._helper.mutex.release()\n\n\n\t\t\t\ndef main():\n\t\"\"\"\n\tThis method initializes the Final State Machine of the node state_machine.py using the SMACH\n\tmodules. Some documentation can be found online at the following link: `smach `_.\n \tEvery state of the node relies on the node state_machine_helper.py, in fact, an instance of the\n \tHelper() situated on the node state_machine_helper.py is passed to every state of the FSM.\n \t\n \t\"\"\"\n\trospy.init_node(anm.NODE_STATE_MACHINE, log_level=rospy.INFO)\n\t\n\thelper = Helper()\n\t\n\t# Create a SMACH state machine\n\tsm = smach.StateMachine(outcomes=['container_interface_surv'])\n\tsm.userdata.sm_counter = 0\n\n\t# Open the container\n\twith sm:\n\t\t# Add states to the container\n\t\tsmach.StateMachine.add(STATE_BUILD_WORLD, BuildWorld(helper),\n\t\t\t\t\t\t\ttransitions={TRANS_BATTERY_LOW:STATE_BUILD_WORLD,\n\t\t\t\t\t\t\t\t TRANS_CHARGE_ON:STATE_BUILD_WORLD, \n\t\t\t\t\t\t\t\t TRANS_BATTERY_OK:STATE_BUILD_WORLD,\n\t\t\t\t\t\t\t\t TRANS_CHECK_LOC:STATE_BUILD_WORLD,\n\t\t\t\t\t\t\t\t TRANS_INFO_DONE:STATE_BUILD_WORLD,\n\t\t\t\t\t\t\t\t TRANS_WORLD_DONE:STATE_REASONER,\n\t\t\t\t\t\t\t\t TRANS_CHECK_DONE:STATE_BUILD_WORLD})\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tsmach.StateMachine.add(STATE_CHARGE, Charge(helper), \n\t\t\t\t\t\t\ttransitions={TRANS_BATTERY_LOW:STATE_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_CHARGE_ON:STATE_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_BATTERY_OK:STATE_REASONER,\n\t\t\t\t\t\t\t\t TRANS_CHECK_LOC:STATE_CHARGE,\n\t\t\t\t\t\t\t TRANS_INFO_DONE:STATE_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_WORLD_DONE:STATE_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_CHECK_DONE:STATE_CHARGE})\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tsmach.StateMachine.add(STATE_REASONER, Reasoner(helper), \n\t\t\t\t\t\t\ttransitions={TRANS_BATTERY_LOW:STATE_REACH_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_CHARGE_ON:STATE_REASONER, \n\t\t\t\t\t\t\t\t TRANS_BATTERY_OK:STATE_REASONER,\n\t\t\t\t\t\t\t\t TRANS_CHECK_LOC:STATE_REASONER,\n\t\t\t\t\t\t\t\t TRANS_INFO_DONE:STATE_MOTION,\n\t\t\t\t\t\t\t\t TRANS_WORLD_DONE:STATE_REASONER,\n\t\t\t\t\t\t\t\t TRANS_CHECK_DONE:STATE_REASONER})\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tsmach.StateMachine.add(STATE_MOTION, Motion(helper), \n\t\t\t\t\t\t\ttransitions={TRANS_BATTERY_LOW:STATE_REACH_CHARGE, \n\t\t\t\t\t\t\t\t TRANS_CHARGE_ON:STATE_MOTION,\n\t\t\t\t\t\t\t TRANS_BATTERY_OK:STATE_MOTION,\n\t\t\t\t\t\t\t\t TRANS_CHECK_LOC:STATE_SURVEILLANCE,\n\t\t\t\t\t\t\t\t TRANS_INFO_DONE:STATE_MOTION,\n\t\t\t\t\t\t\t\t TRANS_WORLD_DONE:STATE_MOTION,\n\t\t\t\t\t\t\t\t TRANS_CHECK_DONE:STATE_MOTION})\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tsmach.StateMachine.add(STATE_REACH_CHARGE, ReachCharge(helper), \n\t\t\t\t\t\t\ttransitions={TRANS_BATTERY_LOW:STATE_REACH_CHARGE, \n\t\t\t\t\t\t\t\t TRANS_CHARGE_ON:STATE_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_BATTERY_OK:STATE_REACH_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_CHECK_LOC:STATE_REACH_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_INFO_DONE:STATE_REACH_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_WORLD_DONE:STATE_REACH_CHARGE,\n\t\t\t\t\t\t\t\t TRANS_CHECK_DONE:STATE_REACH_CHARGE})\n\t\t\t\t\t\t\t\t\t\t\n\t\tsmach.StateMachine.add(STATE_SURVEILLANCE, Surveillance(helper), \n\t\t\t\t\t\t\ttransitions={TRANS_BATTERY_LOW:STATE_REACH_CHARGE, \n\t\t\t\t\t\t\t\t TRANS_CHARGE_ON:STATE_SURVEILLANCE,\n\t\t\t\t\t\t\t\t TRANS_BATTERY_OK:STATE_SURVEILLANCE,\n\t\t\t\t\t\t\t\t TRANS_CHECK_LOC:STATE_SURVEILLANCE,\n\t\t\t\t\t\t\t\t TRANS_INFO_DONE:STATE_SURVEILLANCE,\n\t\t\t\t\t\t\t\t TRANS_WORLD_DONE:STATE_SURVEILLANCE,\n\t\t\t\t\t\t\t\t TRANS_CHECK_DONE:STATE_REASONER})\n\t\t\t\t\t\t\t\t\t\t \n\t# Create and start the introspection server for visualization\n\tsis = smach_ros.IntrospectionServer('server_surv', sm, '/SM_ROOT_SURV')\n\tsis.start()\n\n\t# Execute the state machine\n\toutcome = sm.execute()\n\n\t# Wait for ctrl-c to stop the application\n\trospy.spin()\n\tsis.stop()\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"FraFerrazzi/exprob_assignment2","sub_path":"scripts/state_machine.py","file_name":"state_machine.py","file_ext":"py","file_size_in_byte":17602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70948735946","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import transforms as trs\n\nregisters_file = input('Name of the file with registers: ')\nusers_file = input('Name of the file with users: ')\nadded = input('Name of the file with added to live: ')\ncheckins= input('Name of the file with checkins: ')\n\nregisters = pd.read_csv('{}.csv'.format(registers_file))\nusers = pd.read_csv('{}.csv'.format(users_file))\nadded = pd.read_csv('{}.csv'.format(added))\ncheckins = pd.read_csv('{}.csv'.format(checkins))\n\nregisters['startTime'] = pd.to_datetime(registers['startTime'])\nregisters['completionTime'] = pd.to_datetime(registers['completionTime'])\n\nregisters['startTime'] = pd.to_datetime(registers['startTime'])\nregisters['completionTime'] = pd.to_datetime(registers['completionTime'])\n\ncutoff = []\nfor i, r in registers.iterrows():\n if(pd.isnull(registers.loc[i, 'startTime']) | pd.isnull(registers.loc[i, 'completionTime'])):\n cutoff.append(pd.NaT)\n else:\n diff = registers.loc[i, 'completionTime'] - registers.loc[i, 'startTime']\n cutoff.append(diff)\nregisters['cutoffTime'] = cutoff\nregisters.drop(['userIdId', 'courseIdId', 'cutOffTime'],\n inplace = True, axis = 1)\n\n\ntrav = added['userId'].to_list()\nlivestreams = []\nfor q in registers['userId'].to_list():\n temp = 0\n for qq in trav:\n if(qq == q):\n temp += 1\n livestreams.append(temp)\n\nuserId = registers.userId.to_list()\ncourseId = registers.courseId.to_list()\nnickname = []\nemail = []\nfirst_name = []\nlast_name = []\nmobile = []\nmobileHW = []\nmobileOS = []\ngender = []\nfor i in userId:\n for index, row in users.iterrows():\n if(i == users.loc[index,'id']):\n nickname.append(users.loc[index, 'nickname'])\n email.append(users.loc[index, 'email'])\n first_name.append(users.loc[index, 'firstName'])\n last_name.append(users.loc[index, 'lastName'])\n mobile.append(users.loc[index, 'mobileNumber'])\n mobileHW.append(users.loc[index, 'mobileHW'])\n mobileOS.append(users.loc[index, 'mobileOS'])\n\n\nregisters['livestreams'] = livestreams\nregisters['nickname'] = nickname\nregisters['email'] = email\nregisters['first_name'] = first_name\nregisters['last_name'] = last_name\nregisters['mobile'] = mobile\nregisters['mobileHW'] = mobileHW\nregisters['mobileOS'] = mobileOS\n\ndef change(x):\n x = x.split('-')[0]\n return x\ntemp = registers['genderAgeGroup'].to_list()\ngender = list(map(change, temp))\n\nregisters.insert(3, 'gender', gender)\n\nmedals = registers.copy()\nmedals = medals[medals['racerStatus'] == 'FINISH']\nmedals.drop(['id','userId', 'registerTime', 'racerStatus', 'startTime', 'completionTime',\n'livestreams', 'email', 'mobile', 'mobileHW', 'mobileOS'],\n inplace = True, axis = 1)\n\nmedals = medals.sort_values(by=['cutoffTime']).reset_index(drop = True)\nranking = []\nran = 0\nfor X in range(medals.shape[0]):\n ran+=1\n ranking.append(ran)\nmedals.insert(1, 'overallRanking', ranking)\ntemp = medals.pop('nickname')\nmedals.insert(2, 'nickname', temp)\n\nmale_rank = 0\nfemale_rank = 0\ngender_ranking = []\nfor ind3, r3 in medals.iterrows():\n if(medals.loc[ind3, 'gender'] == 'Male'):\n male_rank += 1\n gender_ranking.append(male_rank)\n else:\n female_rank += 1\n gender_ranking.append(female_rank)\n\nmedals.insert(5, 'genderRanking', gender_ranking)\n\nage_groups = list(set(medals['genderAgeGroup']))\nage_group_ran = [0] * len(age_groups)\nageGroupRanking = []\nfor ind4, r4 in medals.iterrows():\n age_group_ran[age_groups.index(medals.loc[ind4, 'genderAgeGroup'])] += 1\n ageGroupRanking.append(age_group_ran[age_groups.index(medals.loc[ind4, 'genderAgeGroup'])])\nmedals.insert(7, 'genderAgeGroup_Ranking', ageGroupRanking)\n\ntemp = medals.pop('cutoffTime')\nmedals.insert(2, 'Finish Time', temp)\ntemp = medals.pop('first_name')\nmedals.insert(4, 'First Name', temp)\ntemp = medals.pop('last_name')\nmedals.insert(5, 'Last Name', temp)\nmedals.columns = ['Course','Ranking','Finish Time', 'Nickname','First Name','Last Name',\n 'Gender', 'BIB Number', 'Gender Ranking', 'Gender Age Group', 'Gender Age Group Ranking']\n\nregisters['cutoffTime'] = registers['cutoffTime'].astype(str)\nmedals['Finish Time'] = medals['Finish Time'].astype(str)\nt = registers['cutoffTime'].to_list()\nq = medals['Finish Time'].to_list()\ncut_temp = []\nfor tt in t:\n if(tt == 'NaT'):\n cut_temp.append('NaT')\n else:\n tem = tt.split(' ')[2].split('.')[0]\n cut_temp.append(tem)\nregisters['cutoffTime'] = cut_temp\n\ncut_temp = []\nfor qq in q:\n if(qq != 'NaT'):\n tem = qq.split(' ')[2].split('.')[0]\n cut_temp.append(tem)\n else:\n cut_temp.append(qq)\nmedals['Finish Time'] = cut_temp\n\nmax_beacons = checkins['sequenceOrder'].max()\nregister_list = registers['id'].to_list()\nmax_name = 'ST{}'.format(checkins['sequenceOrder'].max())\nfor col in range(int(checkins['sequenceOrder'].max())):\n column_name = 'ST{}'.format(col+1)\n temp = []\n temp_ex = []\n for reg in register_list:\n flag = True\n for check, record in checkins[checkins['registerId'] == reg].iterrows():\n if(checkins.loc[check, 'sequenceOrder'] == (col+1) ):\n temp.append(checkins.loc[check, 'checkInTime'])\n temp_ex.append(1)\n flag = False\n break\n if(flag):\n temp.append(pd.NaT)\n temp_ex.append(0)\n if(col == 0):\n registers['Start ST'] = temp\n registers['Start ST-DISC'] = temp_ex\n elif(col == checkins['sequenceOrder'].max() - 1):\n registers['Finish ST'] = temp\n registers['Finish ST-DISC'] = temp_ex\n else:\n registers[column_name] = temp\n registers[column_name+'-DISC'] = temp_ex\n\n\n\ndes_col = list(filter(lambda k: '-DISC' in k, list(registers)))\nregisters['overall'] = registers[des_col].sum(axis=1)\nregisters['stats(%)'] = round( ( registers[des_col].sum(axis=1) / checkins['sequenceOrder'].max() ) * 100, 1)\n\nintervals = registers.copy()\nintervals = intervals[intervals['racerStatus'] == 'FINISH']\nnicknames = intervals['nickname'].to_list()\nbib = intervals['bibNumber'].to_list()\noverall1 = intervals['overall'].to_list()\nmHW1 = intervals['mobileHW'].to_list()\nmOS1 = intervals['mobileOS'].to_list()\nstats1 = intervals['stats(%)'].to_list()\ndata = {'nickname': nicknames, 'BIB Number': bib,'mobileHW': mHW1,\n'mobileOS':mOS1, 'overall': overall1, 'stats(%)': stats1}\nuser_det = pd.DataFrame(data)\n\nuser_det['interval'] = ''\nfor ind1, r1 in user_det.iterrows():\n comp = user_det.loc[ind1, 'stats(%)']\n if(comp>=0.0 and comp< 20.0): user_det.loc[ind1, 'interval'] = '0-20'\n elif(comp>= 20.0 and comp< 40.0): user_det.loc[ind1, 'interval'] = '20-40'\n elif(comp>= 40.0 and comp< 60.0): user_det.loc[ind1, 'interval'] = '40-60'\n elif(comp>= 60.0 and comp< 80.0): user_det.loc[ind1, 'interval'] = '60-80'\n else: user_det.loc[ind1, 'interval'] = '80-100'\n\nif(medals.shape[0] == 0):\n course_name = input('Enter course name: ')\n with pd.ExcelWriter('super_list{}.xlsx'.format(course_name)) as writer:\n registers.to_excel(writer, sheet_name='Full')\n user_det.to_excel(writer, sheet_name='With intervals')\nelse:\n course_name = medals.loc[0, 'Course']\n medals.to_excel('medalList_{}.xlsx'.format(course_name), index = False)\n with pd.ExcelWriter('super_list{}.xlsx'.format(course_name)) as writer:\n registers.to_excel(writer, sheet_name='Full')\n user_det.to_excel(writer, sheet_name='With intervals')\n\nflag = input('Generate graphics? y/n: ')\n\nif(flag == 'y'):\n\n des_coll = list(filter(lambda k: '-DISC' in k, list(registers)))\n des_coll.append('mobileHW')\n des_coll.append('mobileOS')\n\n flagRunners = input('Include not-finishers? y/n: ')\n if(flagRunners == 'y'):\n detection = registers[registers['racerStatus'] != 'PRESTART'].reset_index(drop = True)\n detection = detection[detection['racerStatus'] != 'GIVEUP'].loc[:, des_coll].reset_index(drop = True)\n else:\n detection = registers[registers['racerStatus'] == 'FINISH'].loc[:, des_coll].reset_index(drop = True)\n\n for ind5, r5 in detection.iterrows():\n if('iPhone' in detection.loc[ind5, 'mobileHW']):\n detection.loc[ind5, 'mobileHW'] = 'iOS'\n detection.loc[ind5, 'mobileOS'] = 'iOS ' + detection.loc[ind5, 'mobileOS'].split('.')[0]\n else:\n detection.loc[ind5, 'mobileHW'] = 'Android'\n detection.loc[ind5, 'mobileOS'] = detection.loc[ind5, 'mobileOS'].split('.')[0]\n\n #registers.to_excel('super_list{}.xlsx'.format(course_name), index = False)\n #medals.to_excel('medalList_{}.xlsx'.format(course_name), index = False)\n\n new_col = [x.split('-')[0] for x in detection.columns.to_list()]\n detection.columns = new_col\n\n detection_overall = detection.copy()\n detection_overall = detection_overall.iloc[:,:-2]\n wr = input('Reguralize?: y/n ')\n detection_ios = detection.copy()\n detection_android = detection.copy()\n detection_ios = detection_ios[detection_ios['mobileHW'] == 'iOS']\n detection_ios = detection_ios.iloc[:,:-2]\n detection_android = detection_android[detection_android['mobileHW'] == 'Android']\n detection_android = detection_android.iloc[:,:-2]\n if(wr == 'y'):\n detection_overall = detection_overall.loc[:, (detection_overall != 0).any(axis=0)]\n detection_ios = detection_ios.loc[:, (detection_ios != 0).any(axis=0)]\n detection_android = detection_android.loc[:, (detection_android != 0).any(axis=0)]\n\n\n detection_mean = detection_overall.sum(axis=0).sum() / detection_overall.shape[1]\n detection_std = detection_overall.sum(axis=0).std()\n poor_st = []\n poor_st_rate = []\n\n if(detection_overall.shape[1] > 80):\n fSize = (30, 22)\n else:\n fSize = (22, 14)\n\n\n\n fig, ax1 = plt.subplots( figsize=fSize, dpi = 300)\n ax1.set_xlabel(\"Beacons\", fontsize=18)\n ax1.set_ylabel(\"# of detections\", fontsize=18)\n ax1.tick_params(axis='both', which='major', labelsize=13)\n ax1.set_title('Detection rate of {} course'.format(course_name), fontsize=22, fontweight='bold')\n\n\n p1 = ax1.bar(detection_overall.columns, detection_overall.sum(axis=0), label = 'Number of detection')\n extra_info = ax1.scatter([],[], label = \"Perfomace: {}%\".format(round(detection_mean/detection_overall.shape[0]*100,1)), color = 'white')\n avg = ax1.axhline(y=detection_mean, color = 'red', lw = 1.5, linestyle='--', label = 'Average detection rate: {}/{}'.format(round(detection_mean), detection_overall.shape[0]))\n lines = [p1,avg, extra_info]\n ax1.legend(lines, [l.get_label() for l in lines], bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0., fontsize=15)\n\n for lbl, txt in enumerate(detection_overall.sum(axis=0)):\n if(txt<(detection_mean - detection_std)):\n ax1.annotate(txt, (detection_overall.columns[lbl], detection_overall.sum(axis=0)[lbl]), size=14,ha='center')\n poor_st.append(detection_overall.columns[lbl])\n poor_st_rate.append(detection_overall.sum(axis=0)[lbl])\n\n plt.figtext(0.05, -0.1, \"Author: Bazarbay Alisher\"\n + \"\\nCourse Name: {}\".format(course_name)\n + \"\\nIntuition: By average, each ST was detected {} times OR {}%\".format(round(detection_mean), round(detection_mean/detection.shape[0]*100,1))\n #+\"\\nST with poor detection rate: {}\".format(poor_st)##,\n ,fontsize=20, wrap=True)\n fig.tight_layout()\n plt.xticks(rotation=90)\n fig.savefig(\"{}_detectionRate.pdf\".format(course_name), format = 'pdf', dpi=300, bbox_inches='tight')\n\n #poorBeac = input('List of poor Beacons? y/n ')\n #if(poorBeac == 'y'):\n # dfPoor = pd.DataFrame(list(zip(poor_st, poor_st_rate)), columns =['ST', 'Rate'])\n # dfPoor = pd.to_excel('poorBeacons.xlxs')\n\n\n detection_mean_ios = detection_ios.sum(axis=0).sum() / detection_ios.shape[1]\n detection_std_ios = detection_ios.sum(axis=0).std()\n\n detection_mean_android = detection_android.sum(axis=0).sum() / detection_android.shape[1]\n detection_std_android = detection_android.sum(axis=0).std()\n fig_os, axs = plt.subplots(2, figsize=(20, 18), dpi = 300)\n fig_os.suptitle('Beacon detection rate of {} course by OS'.format(course_name), fontsize=22, fontweight='bold', y = 0.95)\n axs[0].set_title('iOS', fontsize=20)\n axs[1].set_title('Android', fontsize=20)\n colormap = plt.cm.gist_ncar\n\n #rate_ios = axs[0].scatter([], [], marker = ' ',label = 'Detection: {}'.format(round( detection_mean_ios,1)))\n ios = axs[0].bar(detection_ios.columns, detection_ios.sum(axis=0), label = 'iOS', alpha = 0.5, color = colormap(0.12))\n extra_ios = axs[0].scatter([],[], label = \"Perfomace: {}%\".format(round(detection_mean_ios/detection_ios.shape[0]*100,1)), color = 'white')\n avg_ios = axs[0].axhline(y=detection_mean_ios, color = 'black', lw = 1.5, linestyle='--', label = 'Average detection rate: {}/{} '.format(round(detection_mean_ios, 1), detection_ios.shape[0]))\n #for lbl1, txt1 in enumerate(detection_ios.sum(axis=0)):\n # if(txt1<(detection_mean_ios - detection_std_ios)):\n # axs[0].annotate(txt1, (detection_ios.columns[lbl1], detection_ios.sum(axis=0)[lbl1] + 0.5), size=12,ha='center')\n lines1 = [ios,avg_ios,extra_ios]\n axs[0].legend(lines1, [l.get_label() for l in lines1], bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0., fontsize=15)\n\n\n android = axs[1].bar(detection_android.columns, detection_android.sum(axis=0), label = 'Android', alpha = 0.5, color = colormap(0.7))\n extra_android = axs[0].scatter([],[], label = \"Perfomace: {}%\".format(round(detection_mean_android/detection_android.shape[0]*100,1)), color = 'white')\n avg_android = axs[1].axhline(y=detection_mean_android, color = 'black', lw = 1.5, linestyle='--', label = 'Average detection rate: {}/{}'.format(round(detection_mean_android, 1), detection_android.shape[0]))\n #for lbl2, txt2 in enumerate(detection_android.sum(axis=0)):\n # if(txt2<(detection_mean_android - detection_std_android)):\n # axs[1].annotate(txt2, (detection_android.columns[lbl2], detection_android.sum(axis=0)[lbl2]+0.5), size=12,ha='center')\n lines2 = [android,avg_android,extra_android]\n axs[1].legend(lines2, [l.get_label() for l in lines2], bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0., fontsize=15);\n\n\n\n axs[0].set_xticklabels(detection_ios.columns, rotation=90);\n axs[1].set_xticklabels(detection_android.columns, rotation=90);\n fig_os.savefig(\"{}_detectionRate_OS.pdf\".format(course_name), format = 'pdf', dpi=300, bbox_inches='tight')\n\n os_ver = sorted(list(set(detection['mobileOS'].to_list())))\n dictOS = {}\n volume = []\n for ver in os_ver:\n dftemp = detection.copy()\n dftemp = dftemp[dftemp['mobileOS'] == ver]\n dftemp = dftemp.iloc[:,:-2]\n meanOS = dftemp.sum(axis=0).sum() / dftemp.shape[1]\n rate = round(meanOS/dftemp.shape[0]*100,3)\n dictOS[ver] = rate\n volume.append(dftemp.shape[0])\n\n keys = list(dictOS.keys());\n values = list(dictOS.values());\n\n figOS = plt.figure(figsize = (15,10), dpi=300)\n axOS = figOS.add_subplot(111)\n axOS1 = axOS.twiny()\n axOS1.set_xticks([])\n\n axOS.set_title('{} | detection perfomance by OS versions'.format(course_name), fontsize=15, fontweight='bold')\n\n for x,y,ss in zip(keys,values,volume):\n if('iOS' in x):\n axOS.scatter(x,y, s = ss*300, alpha=0.5, color = colormap(0.12));\n else:\n axOS.scatter(x,y, s = ss*300, alpha=0.5, color = colormap(0.7));\n\n axOS.scatter([],[], label = '{} rate: {}'.format(x,round(y,1)), color = 'white');\n axOS1.scatter([],[], label = '{} detection: {}/{}'.format(x,round(detection_overall.shape[1]*y/100), detection_overall.shape[1]), color = 'white');\n\n\n axOS.set_ylim(min(values)-5, max(values)+5)\n\n for size in range(0,len(dictOS)):\n axOS.annotate(volume[size], (keys[size], values[size]), size=12,ha='center', va = 'center')\n\n\n axOS.legend(fontsize='medium', bbox_to_anchor=(1, 1.009), loc='upper left');\n axOS1.legend( fontsize='medium', bbox_to_anchor=(1, 0.8), loc='upper left');\n\n figOS.savefig(\"{}_rateOSversion.pdf\".format(course_name), format = 'pdf', dpi=300, bbox_inches='tight')\n\n\n df_runners = registers.copy()\n df_runners=df_runners[df_runners['racerStatus'] == 'FINISH']\n df_runners = df_runners.sort_values(by=['cutoffTime']).loc[:, des_coll].reset_index(drop = True)\n df_runners.columns = new_col\n df_runners = df_runners.iloc[:,:-2]\n df_top = df_runners.iloc[:int(df_runners.shape[0]*0.2), :]\n df_bottom = df_runners.iloc[-int(df_runners.shape[0]*0.2):, :]\n\n if(wr == 'y'):\n df_top = df_top.loc[:, (df_top != 0).any(axis=0)]\n df_bottom = df_bottom.loc[:, (df_bottom != 0).any(axis=0)]\n\n top_mean = df_top.sum(axis=0).sum() / df_top.shape[1]\n std_top = df_top.sum(axis=0).std()\n\n bottom_mean = df_bottom.sum(axis=0).sum() / df_bottom.shape[1]\n bottom_std = df_bottom.sum(axis=0).std()\n\n fig_port, axPort = plt.subplots(2, figsize=(20, 24), dpi = 400)\n axPort[0].set_title('Top 20% by Finish Time', fontsize=20, fontweight='bold')\n axPort[1].set_title('Bottom 20% by Finish Time', fontsize=20, fontweight='bold')\n axPort[0].tick_params(axis='both', which='major', labelsize=12)\n axPort[1].tick_params(axis='both', which='major', labelsize=12)\n\n #rate_ios = axs[0].scatter([], [], marker = ' ',label = 'Detection: {}'.format(round( detection_mean_ios,1)))\n top = axPort[0].bar(df_top.columns, df_top.sum(axis=0), label = 'Top 20% racers')\n extra_top = axPort[0].scatter([],[], label = \"Perfomace: {}%\".format(round(top_mean/df_top.shape[0]*100,1)), color='white')\n avg_top = axPort[0].axhline(y=top_mean, color = 'red', lw = 1.5, linestyle='--', label = 'Average detection rate: {}/{} '.format(round(top_mean, 1), df_top.shape[0]))\n\n lines_top = [top,avg_top,extra_top]\n axPort[0].legend(lines_top, [l.get_label() for l in lines_top], bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0., fontsize=15)\n\n\n bottom = axPort[1].bar(df_bottom.columns, df_bottom.sum(axis=0),color = 'C1', label = 'Bottom 20% racers')\n extra_bottom = axPort[0].scatter([],[], label = \"Perfomace: {}%\".format(round(bottom_mean/df_bottom.shape[0]*100,1)), color='white')\n avg_bottom = axPort[1].axhline(y=bottom_mean, color = 'red', lw = 1.5, linestyle='--', label = 'Average detection rate: {}/{}'.format(round(bottom_mean, 1), df_bottom.shape[0]))\n\n lines_bottom = [bottom,avg_bottom,extra_bottom]\n axPort[1].legend(lines_bottom, [l.get_label() for l in lines_bottom], bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0., fontsize=15);\n\n\n\n axPort[0].set_xticklabels(df_top.columns, rotation=90);\n axPort[1].set_xticklabels(df_bottom.columns, rotation=90);\n fig_port.savefig(\"{}_rateTopBottom.pdf\".format(course_name), format = 'pdf', dpi=300, bbox_inches='tight')\n\n\n\n top_mean = df_top.sum(axis=0).sum() / df_top.shape[1]\n std_top = df_top.sum(axis=0).std()\n\n bottom_mean = df_bottom.sum(axis=0).sum() / df_bottom.shape[1]\n bottom_std = df_bottom.sum(axis=0).std()\n\n fig_portS, axPort = plt.subplots(figsize=(15, 10), dpi = 300)\n axPort.set_title('{} | perfomance by top/bottom 20% runners'.format(course_name), fontsize=15, fontweight='bold')\n\n\n #rate_ios = axs[0].scatter([], [], marker = ' ',label = 'Detection: {}'.format(round( detection_mean_ios,1)))\n top = axPort.plot(df_top.columns, df_top.sum(axis=0), label = 'Top 20% by Finish Time')\n bottom = axPort.plot(df_bottom.columns, df_bottom.sum(axis=0), label = 'Bottom 20% Finish Time')\n\n extra_top = axPort.scatter([],[], label = \"Top runners: {}%\".format(round(top_mean/df_top.shape[0]*100,1)))\n extra_bottom = axPort.scatter([],[], label = \"Bottom runners: {}%\".format(round(bottom_mean/df_bottom.shape[0]*100,1)))\n\n\n lines_topbot = [extra_top,extra_bottom]\n axPort.legend(lines_topbot, [l.get_label() for l in lines_topbot], bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0., fontsize=15);\n\n\n\n axPort.set_xticklabels(df_top.columns, rotation=90);\n fig_portS.savefig(\"{}_rateOSversionScatter.pdf\".format(course_name), format = 'pdf', dpi=300, bbox_inches='tight')\n\n\n\n df_brands = registers.copy()\n df_brands = df_brands[df_brands['racerStatus'] == 'FINISH'].loc[:, des_coll].reset_index(drop = True)\n\n for brandind, brandrow in df_brands.iterrows():\n temp = df_brands.loc[brandind, 'mobileHW']\n if('iPhone' in temp):\n df_brands.loc[brandind, 'mobileHW'] = 'iPhone'\n elif('-' in temp):\n if('SM-' in temp): df_brands.loc[brandind, 'mobileHW'] = 'Samsung'\n elif( (len(temp.split('-')[0]) == 3) & (len(temp.split('-')[1]) == 3) ):\n df_brands.loc[brandind, 'mobileHW'] = 'Huawei'\n else:\n df_brands.loc[brandind, 'mobileHW'] = 'Others'\n elif(('MI' in temp) | ('Mi' in temp) | ('mi' in temp)):\n df_brands.loc[brandind, 'mobileHW'] = 'Xiaomi'\n else:\n df_brands.loc[brandind, 'mobileHW'] = 'Others'\n\n\n hw_ver = sorted(list(set(df_brands['mobileHW'].to_list())))\n dictHW = {}\n volumeHW = []\n for hw in hw_ver:\n dftemp = df_brands.copy()\n dftemp = dftemp[dftemp['mobileHW'] == hw]\n dftemp = dftemp.iloc[:,:-2]\n meanHW = dftemp.sum(axis=0).sum() / dftemp.shape[1]\n rate = round(meanHW/dftemp.shape[0]*100,3)\n dictHW[hw] = rate\n volumeHW.append(dftemp.shape[0])\n\n keys = list(dictHW.keys());\n values = list(dictHW.values());\n\n figHW = plt.figure(figsize = (12,10), dpi=300)\n axHW = figHW.add_subplot(111)\n axHW1 = axHW.twiny()\n axHW1.set_xticks([])\n axHW.set_title('{} | detection perfomance by brands'.format(course_name), fontsize=15, fontweight='bold')\n\n for x,y,ss in zip(keys,values,volumeHW):\n axHW.scatter(x,y, s = ss*300, alpha=0.5);\n axHW.scatter([],[], label = '{} rate: {}'.format(x,round(y,1)), color = 'white');\n axHW1.scatter([],[], label = '{} detection: {}/{}'.format(x,round(detection_overall.shape[1]*y/100), detection_overall.shape[1]), color = 'white');\n\n\n axHW.set_ylim(min(values)-5, max(values)+5)\n\n for size in range(0,len(dictHW)):\n axHW.annotate(volumeHW[size], (keys[size], values[size]), size=12,ha='center', va = 'center')\n\n axHW.legend(fontsize='medium', bbox_to_anchor=(1, 1.008), loc='upper left');\n axHW1.legend( fontsize='medium', bbox_to_anchor=(1, 0.8), loc='upper left');\n\n figHW.savefig(\"{}_rateBrand.pdf\".format(course_name), format = 'pdf', dpi=300, bbox_inches='tight')\n\n\n fixCol, fixDet = detection_overall.columns, detection_overall.sum(axis=0)\n fixDic = {'Beacon':fixCol, '#Detections': fixDet}\n fixDf = pd.DataFrame(fixDic)\n fixDf.to_excel('Beacons.xlsx')\n","repo_name":"Awekabaz/Work-Simplifiers","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":23126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29142469659","text":"from selene import browser, by, be, have, query\nfrom selene.support.conditions import not_\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.by import By\nfrom functools import partial\nfrom selenium.common.exceptions import ElementClickInterceptedException\nfrom selene import command\nimport time\n\n\ndef get_element_active(element, next_index):\n return int(element.get(query.text)) == next_index\n\n\ndef pegar_espelho_grupo(elemento):\n return 'idBtnVisualizarEspelhoGrupo' in elemento.get(query.attribute('id'))\n\n\nbrowser.open('http://dgp.cnpq.br/dgp/faces/consulta/consulta_parametrizada.jsf')\n\n# Clique no elemento com o ID correto (usando aspas duplas no seletor CSS)\nbrowser.element(by.name(\"idFormConsultaParametrizada:buscaRefinada\")).should(be.visible).click()\nbrowser.element(by.css('.control-label')).should(be.visible)\nreg = browser.element(by.xpath('//div[@id=\"idFormConsultaParametrizada:idRegiao\"]'))\nreg.click()\nbrowser.element(by.xpath('//*[@id=\"idFormConsultaParametrizada:idRegiao_panel\"]/div/ul/li[5]')).click()\ntime.sleep(2)\nbrowser.element(by.xpath('//div[@id=\"idFormConsultaParametrizada:idUF\"]')).click()\nbrowser.element(by.xpath('//*[@id=\"idFormConsultaParametrizada:idUF_panel\"]/div/ul/li[3]')).click()\ntime.sleep(2)\nbrowser.element(by.xpath('//div[@id=\"idFormConsultaParametrizada:idInst\"]')).click()\nbrowser.element(by.xpath('//*[@id=\"idFormConsultaParametrizada:idInst_panel\"]/div/ul/li[77]')).click()\nbrowser.element(by.xpath('//*[@id=\"idFormConsultaParametrizada:idPesquisar\"]')).click()\n\ntime.sleep(4)\nbrowser.element(by.xpath('//*[@id=\"idFormConsultaParametrizada:resultadoDataList:0:idBtnVisualizarEspelhoGrupo\"]')).should(be.visible)\n\nfor index_page in range(1, 18, 1):\n active_element = int(browser.element(by.css('.ui-state-active')).get(query.text))\n pages = browser.all(by.css('.ui-paginator-page'))\n fun_next_active = partial(get_element_active, next_index=active_element+1)\n map_pages = list(filter(fun_next_active, pages))\n links = browser.all('.controls a')\n map_grupo_espelho = list(filter(pegar_espelho_grupo, links))\n\n for button in map_grupo_espelho[5:]:\n button.click()\n\n time.sleep(4)\n browser.switch_to_next_tab()\n print(browser.driver.current_url)\n browser.element(by.css('.selo-grupo')).should(be.visible)\n\n h1 = browser.element(by.css('h1')).get(query.text)\n h1_text = browser.element(by.xpath('//*[@id=\"idFormVisualizarGrupoPesquisa\"]/div/div[2]')).get(query.text)\n h1_text_split = h1_text.split('espelhogrupo/')[1]\n browser.save_page_source(f'{h1_text_split}.html')\n browser.close_current_tab()\n browser.switch_to_tab(0)\n print(f'index_button: {button.get(query.text)}, index_page: {index_page}, url: {browser.driver.current_url}')\n time.sleep(4)\n\n\n map_pages[0].click()\n time.sleep(4)\n print(f'active_element: {active_element}')\n active_element = int(browser.element(by.css('.ui-state-active')).get(query.text))\n time.sleep(3)\n\ninput()\n\n","repo_name":"paulonneves/scrapper-diretorios-grupos-pesquisa","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9675585083","text":"import unittest\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom SatisfactoryPlanner.Factory.Factory import Miner, Factory\nfrom SatisfactoryPlanner.Resource.Resource import IronRod, IronIngot, Screw\nfrom SatisfactoryPlanner.Resource.Ore import IronOre\nfrom SatisfactoryPlanner.Machines.Machines import Smelter, Constructor\n\n\nclass TestFactory(unittest.TestCase):\n def test_basic_functionality(self):\n factory = Factory('Test Factory')\n\n miner = Miner(1, IronOre('Impure'))\n factory.add_input(miner)\n\n # Confirm correct production (intermediate)\n self.assertEqual(miner.get_capacity(), {IronOre: 30})\n\n factory.add_machine(Smelter('Iron Ingot'))\n\n # Confirm correct production\n self.assertEqual(factory.get_capacity(), Smelter('Iron Ingot').get_production_rates())\n\n # Confirm correct consumption\n self.assertEqual(miner.get_capacity(), {IronOre: 0})\n\n def test_advanced_functionality(self):\n factory = Factory('Test Factory')\n\n # Build and test inputs and internal production for screw production\n miner = Miner(1, IronOre('Impure'))\n factory.add_input(miner)\n\n # Confirm correct production (intermediate)\n factory.add_machine(Smelter('Iron Ingot'))\n self.assertEqual(factory.get_capacity(), {IronIngot: 30})\n\n # Confirm correct production (intermediate)\n factory.add_machine(Constructor('Iron Rod'))\n self.assertEqual(factory.get_capacity(), {IronIngot: 15, IronRod: 15})\n\n factory.add_machine(Constructor('Screw'))\n\n # Confirm correct production\n self.assertEqual(factory.get_capacity(), {IronIngot: 15, IronRod: 5, Screw: 40})\n\n def test_input_miner_upgrade(self):\n factory = Factory('Test Factory')\n\n miner = Miner(1, IronOre('Impure'))\n factory.add_input(miner)\n\n # Confirm correct production (intermediate)\n self.assertEqual(miner.get_capacity(), {IronOre: 30})\n\n factory.upgrade_miners(2)\n\n # Confirm correct production\n self.assertEqual(miner.get_capacity(), {IronOre: 60})\n\n def test_get_name(self):\n factory = Factory('Test Factory')\n\n self.assertEqual(factory.get_name(), 'Test Factory')\n\n def test_get_inputs(self):\n factory = Factory('Test Factory')\n\n miner = Miner(1, IronOre('Impure'))\n factory.add_input(miner)\n\n self.assertEqual(factory.get_inputs(), [miner])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cc121/SatisfactoryPlanner","sub_path":"tests/TestFactory.py","file_name":"TestFactory.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4286084092","text":"import json\nimport pathlib\n\nfrom django.conf import settings\nfrom django.core.management import BaseCommand\nfrom halo.halo_api_client import HaloAPIClient\n\nfrom help_desk_api.models import HelpDeskCreds\n\n\nclass Command(BaseCommand):\n help = \"Get a User record from the Halo API\"\n\n def __init__(self, stdout=None, stderr=None, **kwargs):\n super().__init__(stdout, stderr, **kwargs)\n self.credentials = HelpDeskCreds.objects.get(\n zendesk_email=\"halo-only@example.com\" # /PS-IGNORE\n )\n self.client = HaloAPIClient(\n self.credentials.halo_client_id, self.credentials.halo_client_secret\n )\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"userid\", type=int, help=\"Halo ID of User\", default=38, nargs=\"?\"\n ) # Default: me! :-)\n parser.add_argument(\"-o\", \"--output\", type=pathlib.Path)\n\n def handle(self, *args, **options):\n user_id = options[\"userid\"]\n user_response = self.client.get(f\"/Users/{user_id}\")\n if options[\"output\"]:\n output_path = settings.BASE_DIR / options[\"output\"]\n output_path.parent.mkdir(parents=True, exist_ok=True)\n with open(output_path, \"w\") as output_file:\n json.dump(user_response, output_file, indent=4)\n else:\n json.dump(user_response, self.stdout, indent=4)\n","repo_name":"uktrade/help-desk-service","sub_path":"help_desk_api/management/commands/halo_user.py","file_name":"halo_user.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"30360485431","text":"import argparse\nimport subprocess\nimport time\nfrom datetime import datetime, timedelta\nimport os\n\ndef schedule_daily_task(time_str, file_path, log_file, custom_dir):\n try:\n # Parse the input time\n scheduled_time = datetime.strptime(time_str, '%H:%M:%S')\n\n while True:\n # Calculate the delay until the scheduled time\n now = datetime.now()\n scheduled_datetime = datetime(now.year, now.month, now.day, scheduled_time.hour, scheduled_time.minute, scheduled_time.second)\n delay = (scheduled_datetime - now).total_seconds()\n\n if delay > 0:\n print(f\"Task scheduled at {scheduled_datetime}. Waiting for {delay} seconds...\")\n time.sleep(delay)\n\n # Execute the command and redirect output to the log file\n with open(log_file, 'a') as log:\n command = ['python', custom_dir, file_path]\n subprocess.Popen(command, stdout=log, stderr=log, text=True, close_fds=True)\n print(f\"Command executed: {' '.join(command)}\")\n\n # Schedule for the next day\n scheduled_datetime += timedelta(days=1)\n\n\n else:\n time.sleep(10)\n print(\"Scheduled time has already passed for today. Rescheduling for tomorrow.\")\n print((datetime.now() - scheduled_datetime).total_seconds())\n\n except Exception as e:\n print(f\"Error scheduling the task: {e}\")\n\nimport os\n\n\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Schedule a daily task to run customdirscan.py at a specified time.\")\n parser.add_argument(\"time\", help=\"Scheduled time in the format 'HH:MM:SS'\")\n parser.add_argument(\"filepath\", help=\"File path for customdirscan.py to operate on\")\n parser.add_argument(\"logfile\", help=\"Path to the log file\")\n parser.add_argument(\"customdir\", help=\"Path to the CustomDirScan.py\")\n\n args = parser.parse_args()\n\n schedule_daily_task(args.time, args.filepath, args.logfile, args.customdir)\n","repo_name":"Kenny4103/V.U.L.N","sub_path":"Scanning/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15352604210","text":"from flask import Flask, jsonify, request\nfrom flask_pymongo import PyMongo\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config[\"MONGO_URI\"] = \"mongodb://127.0.0.1:27017/bookex?compressors=disabled\"\n\nmongo = PyMongo(app)\n\n@app.route('/home')\ndef hello_world():\n return '

    Hello, World!

    '\n\n@app.route('/users')\ndef get_all_users():\n # get users' data from mongo\n users = mongo.db.users.find()\n return jsonify({\n \"data\": users\n })\n\n@app.route('/add/users', methods =['POST'])\ndef add_user():\n u = dict(request.form)\n # insert new user into users' collection in mongo\n inserted = mongo.db.users.insert(u)\n return jsonify({\n \"message\": \"Inserted\"\n })\n\n@app.route('/users/')\ndef get_user(username):\n # get users' data from mongo\n user = mongo.db.users.find_one({ \"username\" : username})\n user[\"_id\"] = str(user[\"_id\"])\n return jsonify({\n \"data\": user\n })\n\n@app.route('/user//addbook', methods=['POST'])\ndef add_book(username):\n # récuperer les données du nouveau livre à partir de la requetes utilisateur\n book = dict(request.form)\n if '_id' not in book:\n # le livre n'existe pas dans la base il faut l'ajouter\n book_inserted = mongo.db.books.insert_one(book) \n\n # Ajouter le livre dans les livres de l utilisateur\n update_statement = { \"$push\": { \"books\": {\n \"isbn\": book[\"isbn\"],\n \"status\": \"Available\", \n \"availableOn\": datetime.today().strftime('%Y-%m-%d')\n }} }\n user_books_updated = mongo.db.users.update({\"username\": username}, update_statement)\n\n return jsonify({\n \"message\": \"Inserted\"\n })\n\ndef objectId_to_str(item):\n # les elements utiliser par mongo sont des Dictionnaire immutable (on peut pas les changer)\n # il faut créer un dictionnaire à partir de ça avant de changer la valeur\n editable_item = dict(item)\n editable_item[\"_id\"] = str(editable_item[\"_id\"])\n return editable_item\n\n@app.route('/books')\ndef get_books():\n # get users' data from mongo\n books = list(mongo.db.books.find())\n # Pour chaque livre dans la liste boosks convertir le _id de ObjectId à str (pour qu'il soit json serialisable)\n books = list(map(objectId_to_str, books)) ## apllique la fonction objectId_to_str sur tous les elements de la liste books \n \n return jsonify({\n \"data\": books\n })\n\n# decorator\n@app.route('/request/', methods=['POST'])\ndef create_request(username):\n #create request\n request_data = dict(request.form)\n request_data['from'] = username\n request_data['time'] = datetime.now()\n request_data['status'] = 'Pending'\n # insert new user into users' collection in mongo\n inserted = mongo.db.requests.insert_one(request_data)\n print(\"\\n\".join(dir(inserted)))\n return jsonify({\n \"message\": \"Inserted\"\n })\n\n@app.route('/incoming/requests/')\ndef get_incoming_requests(username):\n requests_data = mongo.db.requests.find({ \"to\" : username})\n requests = list(map(objectId_to_str, requests_data))\n return jsonify({\n \"data\" : requests \n })\n\n\n@app.route('/outgoing/requests/')\ndef get_outgoing_requests(username):\n requests_data = mongo.db.requests.find({ \"from\" : username})\n requests = list(map(objectId_to_str, requests_data))\n return jsonify({\n \"data\" : requests \n })\n\nif __name__ == '__main__':\n # main function\n app.run(debug=True)\n\n","repo_name":"SanaaCHAOU/BookEx","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22856953701","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 23 16:44:22 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\nimport tensorflow.compat.v1 as tf \r\ntf.disable_v2_behavior()\r\n\r\nimport os \r\nimport shutil\r\nimport random\r\nimport math\r\nimport scipy.io as sio\r\nimport time\r\nfrom skimage import measure\r\n# import binvox_rw\r\nimport argparse\r\nimport trimesh\r\nfrom im2mesh.utils import libmcubes\r\nfrom im2mesh.utils.libkdtree import KDTree\r\nfrom sample_func import get_sample, init_sphere, init_smooth_grid_index, bigger\r\nimport re\r\nimport tf_util\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--train',action='store_true', default=False)\r\nparser.add_argument('--data_dir', type=str, required=True)\r\nparser.add_argument('--out_dir', type=str, required=True)\r\nparser.add_argument('--class_idx', type=str, default=\"026911156\")\r\nparser.add_argument('--save_idx', type=int, default=-1)\r\nparser.add_argument('--CUDA', type=int, default=0)\r\nparser.add_argument('--index', type=int, default=0)\r\nparser.add_argument('--dataset', type=str, default=\"shapenet\")\r\nparser.add_argument('--scale', type=float, default=1.0)\r\nparser.add_argument('--lr', type=float, default=0.7)\r\nparser.add_argument('--base_lr', type=float, default=0.1)\r\nparser.add_argument('--vox_loss_weight', type=float, default=10)\r\nparser.add_argument('--grad_loss_weight', type=float, default=0.1)\r\nparser.add_argument('--sdf_loss_weight', type=float, default=1.0)\r\nparser.add_argument('--sphere_radius', type=float, default=0.5)\r\nparser.add_argument('--level', type=int, default=15)\r\nparser.add_argument('--obj_ind', type=int, default=0)\r\na = parser.parse_args()\r\n\r\ncuda_idx = str(a.CUDA)\r\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= cuda_idx\r\n\r\nBS = 1\r\nPOINT_NUM = 50000\r\nPOINT_NUM_GT = 50000\r\nINPUT_DIR = a.data_dir\r\nINDEX = a.index\r\nVOX_SIZE =128\r\nSCALE = a.scale\r\nLR = a.lr\r\nBASE_LR = a.base_lr\r\nVOX_LOSS_WEIGHT = a.vox_loss_weight\r\nGRAD_LOSS_WEIGHT = a.grad_loss_weight\r\nSDF_LOSS_WEIGHT = a.sdf_loss_weight\r\nSAMPLE_TYPE= 10 # a.sample_type\r\nTEST_VOX_SIZE = 256\r\nOUTPUT_DIR = a.out_dir\r\n\r\nobj_name = os.path.join(INPUT_DIR, 'demo.ply')\r\nif obj_name[-4:] == '.xyz':\r\n gttxt = np.loadtxt(obj_name)\r\n gttxt = gttxt[:, :3]\r\nelif 'txt' in obj_name:\r\n gttxt = np.loadtxt(obj_name)\r\nelif 'ply' in obj_name:\r\n gttxt = trimesh.load(obj_name)\r\n gttxt = gttxt.vertices\r\nelif 'npy' in obj_name:\r\n gttxt = np.load(obj_name)\r\n gttxt = np.float64(gttxt)\r\norigin_gt = gttxt\r\nminn = np.min(gttxt)\r\nmaxn = np.max(gttxt)\r\ngttxt = (gttxt - minn) / (maxn - minn)\r\ngttxt -= 0.5\r\ngttxt_bigger = bigger(gttxt, VOX_SIZE)\r\n\r\nsphere_init_sdf = init_sphere(radius=a.sphere_radius, size=VOX_SIZE) # np.load('vox_np_256.npy')\r\nuseful_index, useful_index_test, useful_index_weight = init_smooth_grid_index(pc=gttxt_bigger, size=VOX_SIZE, level=a.level) # np.loadtxt('usefull_index_gt_armadillo.xyz')\r\n\r\nkdtree = KDTree(gttxt)\r\n\r\nif(a.dataset==\"shapenet\" or a.dataset=='other'):\r\n GT_DIR = './origin_data/' + a.class_idx + '/'\r\nif(a.dataset==\"famous\"):\r\n GT_DIR = './data/famous_noisefree/03_meshes/'\r\nif(a.dataset==\"ABC\"):\r\n GT_DIR = './data/abc_noisefree/03_meshes/'\r\n\r\nGT_DIR = os.path.join(INPUT_DIR, 'famous_dense', '03_meshes/')\r\n\r\nTRAIN = a.train\r\nbd = 1.0 \r\ntest_bd = 0.55\r\n\r\nif(TRAIN):\r\n if os.path.exists(OUTPUT_DIR):\r\n shutil.rmtree(OUTPUT_DIR)\r\n print ('test_res_dir: deleted and then created!')\r\n os.makedirs(OUTPUT_DIR)\r\nelse:\r\n POINT_NUM =TEST_VOX_SIZE * TEST_VOX_SIZE\r\n\r\nnp.savetxt(os.path.join(OUTPUT_DIR, 'gt.txt'), origin_gt)\r\n\r\n\r\ndef distance_p2p(points_src, normals_src, points_tgt, normals_tgt):\r\n ''' Computes minimal distances of each point in points_src to points_tgt.\r\n\r\n Args:\r\n points_src (numpy array): source points\r\n normals_src (numpy array): source normals\r\n points_tgt (numpy array): target points\r\n rmals_tgt (numpy array): target normals\r\n '''\r\n kdtree = KDTree(points_tgt)\r\n dist, idx = kdtree.query(points_src)\r\n\r\n if normals_src is not None and normals_tgt is not None:\r\n normals_src = \\\r\n normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)\r\n normals_tgt = \\\r\n normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)\r\n\r\n# normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)\r\n# # Handle normals that point into wrong direction gracefully\r\n# # (mostly due to mehtod not caring about this in generation)\r\n# normals_dot_product = np.abs(normals_dot_product)\r\n \r\n normals_dot_product = np.abs(normals_tgt[idx] * normals_src)\r\n normals_dot_product = normals_dot_product.sum(axis=-1)\r\n else:\r\n normals_dot_product = np.array(\r\n [np.nan] * points_src.shape[0], dtype=np.float32)\r\n return dist, normals_dot_product\r\n\r\ndef eval_pointcloud(pointcloud, pointcloud_tgt,\r\n normals=None, normals_tgt=None):\r\n ''' Evaluates a point cloud.\r\n\r\n Args:\r\n pointcloud (numpy array): predicted point cloud\r\n pointcloud_tgt (numpy array): target point cloud\r\n normals (numpy array): predicted normals\r\n normals_tgt (numpy array): target normals\r\n '''\r\n # Return maximum losses if pointcloud is empty\r\n\r\n\r\n pointcloud = np.asarray(pointcloud)\r\n pointcloud_tgt = np.asarray(pointcloud_tgt)\r\n\r\n # Completeness: how far are the points of the target point cloud\r\n # from thre predicted point cloud\r\n completeness, completeness_normals = distance_p2p(\r\n pointcloud_tgt, normals_tgt, pointcloud, normals\r\n )\r\n completeness2 = completeness**2\r\n\r\n completeness = completeness.mean()\r\n completeness2 = completeness2.mean()\r\n completeness_normals = completeness_normals.mean()\r\n\r\n # Accuracy: how far are th points of the predicted pointcloud\r\n # from the target pointcloud\r\n accuracy, accuracy_normals = distance_p2p(\r\n pointcloud, normals, pointcloud_tgt, normals_tgt\r\n )\r\n accuracy2 = accuracy**2\r\n\r\n accuracy = accuracy.mean()\r\n accuracy2 = accuracy2.mean()\r\n accuracy_normals = accuracy_normals.mean()\r\n #print(completeness,accuracy,completeness2,accuracy2)\r\n # Chamfer distance\r\n chamferL2 = 0.5 * (completeness2 + accuracy2)\r\n print('chamferL2:',chamferL2)\r\n normals_correctness = (\r\n 0.5 * completeness_normals + 0.5 * accuracy_normals\r\n )\r\n chamferL1 = 0.5 * (completeness + accuracy)\r\n print('normals_correctness:',normals_correctness,'chamferL1:',chamferL1)\r\n return normals_correctness, chamferL1, chamferL2\r\n\r\ndef safe_norm_np(x, epsilon=1e-12, axis=1):\r\n return np.sqrt(np.sum(x*x, axis=axis) + epsilon)\r\n\r\ndef safe_norm(x, epsilon=1e-12, axis=None):\r\n return tf.sqrt(tf.reduce_sum(x ** 2, axis=axis) + epsilon)\r\n\r\ndef boundingbox(x,y,z):\r\n return min(x),max(x),min(y),max(y),min(z),max(z)\r\n\r\n \r\n\r\ndef chamfer_distance_tf_None(array1, array2):\r\n array1 = tf.reshape(array1,[-1,3])\r\n array2 = tf.reshape(array2,[-1,3])\r\n av_dist1 = av_dist_None(array1, array2)\r\n av_dist2 = av_dist_None(array2, array1)\r\n return av_dist1+av_dist2\r\n\r\n\r\ndef distance_matrix_None(array1, array2, num_point, num_features = 3):\r\n \"\"\"\r\n arguments: \r\n array1: the array, size: (num_point, num_feature)\r\n array2: the samples, size: (num_point, num_feature)\r\n returns:\r\n distances: each entry is the distance from a sample to array1\r\n , it's size: (num_point, num_point)\r\n \"\"\"\r\n expanded_array1 = tf.tile(array1, (num_point, 1))\r\n expanded_array2 = tf.reshape(\r\n tf.tile(tf.expand_dims(array2, 1), \r\n (1, num_point, 1)),\r\n (-1, num_features))\r\n distances = tf.norm(expanded_array1-expanded_array2, axis=1)\r\n distances = tf.reshape(distances, (num_point, num_point))\r\n return distances\r\n\r\ndef av_dist_None(array1, array2):\r\n \"\"\"\r\n arguments:\r\n array1, array2: both size: (num_points, num_feature)\r\n returns:\r\n distances: size: (1,)\r\n \"\"\"\r\n distances = distance_matrix_None(array1, array2,points_input_num[0,0])\r\n distances = tf.reduce_min(distances, axis=1)\r\n distances = tf.reduce_mean(distances)\r\n return distances\r\n\r\n\r\ndef get_reg_loss():\r\n res = tf.zeros([])\r\n vars = tf.trainable_variables()\r\n for v in vars:\r\n res += tf.nn.l2_loss(v)\r\n return res\r\n\r\n\r\ndef chamfer_distance_tf_None(p, q):\r\n from nn_distance import tf_nndistance\r\n a,b,c,d = tf_nndistance.nn_distance(p,q)\r\n cd1 = tf.reduce_mean(a)\r\n cd2 = tf.reduce_mean(c)\r\n return cd1+cd2\r\n\r\n\r\nfeature = tf.placeholder(tf.float64, shape=[BS,None,SHAPE_NUM])\r\npoints_target = tf.placeholder(tf.float64, shape=[BS,POINT_NUM,3])\r\ninput_points_3d = tf.placeholder(tf.float64, shape=[BS, POINT_NUM,3])\r\npoints_target_num = tf.placeholder(tf.int64, shape=[1,1])\r\npoints_input_num = tf.placeholder(tf.int64, shape=[1,1])\r\nglobal_step = tf.placeholder(tf.float64, shape=[])\r\n\r\n\r\ndef smaller(p):\r\n p = (p - (VOX_SIZE - 1)/2.0) / ((VOX_SIZE-1)/ 2.0/bd)\r\n return p\r\n \r\ndef trilinear_interpolation_3d(data, warp):\r\n \"\"\"\r\n Interpolate a 3D array (monochannel).\r\n :param data: 3D tensor.\r\n :param warp: a list of 3D coordinates to interpolate. 2D tensor with shape (n_points, 3).\r\n \"\"\"\r\n # warp = (warp + 1) * (VOX_SIZE / 2)\r\n n_pts = warp.shape[0]\r\n # Pad data around to avoid indexing overflow\r\n data = tf.pad(data, [[1, 1], [1, 1], [1, 1]], mode='SYMMETRIC')\r\n warp = warp + tf.constant([1, 1, 1], dtype='float64')\r\n i000 = tf.cast(tf.floor(warp), dtype=tf.int64)\r\n i100 = i000 + tf.constant([1, 0, 0], dtype=tf.int64)\r\n i010 = i000 + tf.constant([0, 1, 0], dtype=tf.int64)\r\n i001 = i000 + tf.constant([0, 0, 1], dtype=tf.int64)\r\n i110 = i000 + tf.constant([1, 1, 0], dtype=tf.int64)\r\n i101 = i000 + tf.constant([1, 0, 1], dtype=tf.int64)\r\n i011 = i000 + tf.constant([0, 1, 1], dtype=tf.int64)\r\n i111 = i000 + tf.constant([1, 1, 1], dtype=tf.int64)\r\n c000 = tf.gather_nd(data, i000)\r\n c100 = tf.gather_nd(data, i100)\r\n c010 = tf.gather_nd(data, i010)\r\n c001 = tf.gather_nd(data, i001)\r\n c110 = tf.gather_nd(data, i110)\r\n c101 = tf.gather_nd(data, i101)\r\n c011 = tf.gather_nd(data, i011)\r\n c111 = tf.gather_nd(data, i111)\r\n # build matrix\r\n h00 = tf.ones(n_pts, dtype=tf.float64)\r\n x0 = tf.cast(i000[:, 0], dtype=tf.float64)\r\n y0 = tf.cast(i000[:, 1], dtype=tf.float64)\r\n z0 = tf.cast(i000[:, 2], dtype=tf.float64)\r\n x1 = tf.cast(i111[:, 0], dtype=tf.float64)\r\n y1 = tf.cast(i111[:, 1], dtype=tf.float64)\r\n z1 = tf.cast(i111[:, 2], dtype=tf.float64)\r\n h1 = tf.stack([h00, x0, y0, z0, x0 * y0, x0 * z0, y0 * z0, x0 * y0 * z0])\r\n h2 = tf.stack([h00, x1, y0, z0, x1 * y0, x1 * z0, y0 * z0, x1 * y0 * z0])\r\n h3 = tf.stack([h00, x0, y1, z0, x0 * y1, x0 * z0, y1 * z0, x0 * y1 * z0])\r\n h4 = tf.stack([h00, x1, y1, z0, x1 * y1, x1 * z0, y1 * z0, x1 * y1 * z0])\r\n h5 = tf.stack([h00, x0, y0, z1, x0 * y0, x0 * z1, y0 * z1, x0 * y0 * z1])\r\n h6 = tf.stack([h00, x1, y0, z1, x1 * y0, x1 * z1, y0 * z1, x1 * y0 * z1])\r\n h7 = tf.stack([h00, x0, y1, z1, x0 * y1, x0 * z1, y1 * z1, x0 * y1 * z1])\r\n h8 = tf.stack([h00, x1, y1, z1, x1 * y1, x1 * z1, y1 * z1, x1 * y1 * z1])\r\n h = tf.stack([h1, h2, h3, h4, h5, h6, h7, h8])\r\n h = tf.transpose(h, perm=[2, 0, 1])\r\n c = tf.transpose(tf.stack([c000, c100, c010, c110, c001, c101, c011, c111]))\r\n c = tf.expand_dims(c, -1)\r\n a = tf.matmul(tf.matrix_inverse(h), c)[:, :, 0]\r\n x = warp[:, 0]\r\n y = warp[:, 1]\r\n z = warp[:, 2]\r\n\r\n f = a[:, 0] + a[:, 1] * x + a[:, 2] * y + a[:, 3] * z + \\\r\n a[:, 4] * x * y + a[:, 5] * x * z + a[:, 6] * y * z + a[:, 7] * x * y * z\r\n \r\n gradx = a[:, 1] + a[:, 4] * y + a[:, 5] * z + a[:, 7] * y * z\r\n grady = a[:, 2] + a[:, 4] * x + a[:, 6] * z + a[:, 7] * x * z\r\n gradz = a[:, 3] + a[:, 5] * x + a[:, 6] * y + a[:, 7] * x * y\r\n gradx = gradx[:, None]\r\n grady = grady[:, None]\r\n gradz = gradz[:, None]\r\n grad = tf.concat([gradx, grady, gradz], 1)\r\n\r\n return f[:, None], grad\r\n \r\n\r\ndef get_vox(dim):\r\n a = sphere_init_sdf\r\n a = np.float64(a)\r\n a = tf.convert_to_tensor(a)\r\n vox_tensor = tf.Variable(a)\r\n return vox_tensor\r\n \r\n \r\ndef gridpull(input_points_3d, points_target):\r\n vox = get_vox(VOX_SIZE)\r\n points = bigger(input_points_3d, VOX_SIZE) # (input_points_3d + 1) * (VOX_SIZE / 2)\r\n gtpoints = bigger(points_target, VOX_SIZE)\r\n sdf, grad = trilinear_interpolation_3d(vox, points[0])\r\n gtsdf, gtgrad = trilinear_interpolation_3d(vox, gtpoints[0])\r\n gtsdf = gtsdf[None]\r\n sdf = sdf[None]\r\n dis = tf.exp(tf.sqrt(tf.reduce_sum((input_points_3d - points_target)**2, 2)))\r\n grad = grad[None]\r\n gtgrad = gtgrad[None]\r\n normal_p_lenght = tf.expand_dims(safe_norm(grad, axis = -1), -1)\r\n gt_normal_p_length = tf.expand_dims(safe_norm(gtgrad, axis=-1), -1)\r\n grad_norm = grad /normal_p_lenght\r\n gtgrad_norm = gtgrad/gt_normal_p_length\r\n g_points = input_points_3d - sdf * grad_norm\r\n gtgrad2 = gtgrad \r\n gtgrad2 = gtgrad2[None]\r\n gt_normal_p_length2 = tf.expand_dims(safe_norm(gtgrad2, axis=-1), -1)\r\n gtgrad_norm2 = gtgrad2/gt_normal_p_length2\r\n return g_points, sdf, vox, grad_norm, dis, gtsdf, gtgrad_norm, gtgrad_norm2, normal_p_lenght\r\n\r\n\r\ndef get_lr(step):\r\n step = tf.cast(step, tf.float32)\r\n n = tf.floor(tf.divide(step, 400))\r\n bilv = tf.pow(LR, n)\r\n lr = BASE_LR * bilv\r\n return lr\r\n\r\n \r\ndef get_sample_gt_pair(gts, kdtree, batch_size, pn, step, sample_weight):\r\n noise = get_sample(gts, POINT_NUM_GT, SAMPLE_TYPE, step, sample_weight)\r\n dist, idx = kdtree.query(noise)\r\n select_gts = gts[idx]\r\n noise = noise[None]\r\n select_gts = select_gts[None]\r\n return noise, select_gts\r\n\r\n\r\ng_points, sdf, voxs, grad, dis, gtsdf, gtgrad, gtgrad2, grad_len = gridpull(input_points_3d, points_target)\r\n\r\nuseful_index_tf = tf.cast(tf.convert_to_tensor(useful_index), tf.int32)\r\nuseful_index_tf_weight = tf.cast(tf.convert_to_tensor(useful_index_weight), tf.float64)\r\nuseful_middle_grid_sdf = tf.gather_nd(voxs, useful_index_tf)\r\nvox_loss = tf.zeros(useful_middle_grid_sdf.shape, tf.float64)\r\n\r\nfor i in range(6):\r\n index = useful_index.copy()\r\n if i < 3:\r\n index[:, i] += 1\r\n else:\r\n index[:, i - 3] -= 1\r\n index = tf.cast(tf.convert_to_tensor(index), tf.int32)\r\n useful_grid_sdf = tf.gather_nd(voxs, index)\r\n vox_loss += (useful_grid_sdf - useful_middle_grid_sdf)**2\r\n\r\nvox_loss = tf.reduce_mean(useful_index_tf_weight * vox_loss)\r\n\r\nl2_loss = tf.reduce_mean(dis * tf.norm((points_target-g_points), axis=-1))\r\nsdf_loss = tf.reduce_mean((gtsdf)**2)\r\nif TRAIN:\r\n sdf_mins_loss = tf.reduce_mean((grad[:, 3 * POINT_NUM_GT // 4 : 7 * POINT_NUM_GT // 8] - grad[:, 7 * POINT_NUM_GT // 8:])**2)\r\nelse:\r\n sdf_mins_loss = tf.zeros([], 'float64')\r\n\r\n\r\ngrad_len_loss = tf.reduce_mean((grad_len - 1)**2)\r\ngrad_loss = 1 - tf.reduce_mean(tf.reduce_sum(grad * gtgrad, 2))\r\nprint('l2_loss:',l2_loss)\r\n\r\nvox_loss = VOX_LOSS_WEIGHT * vox_loss\r\ngrad_loss = GRAD_LOSS_WEIGHT * grad_loss\r\nsdf_loss = SDF_LOSS_WEIGHT * sdf_loss\r\n\r\nloss = l2_loss + vox_loss + sdf_loss + grad_loss #+ grad_len_loss # + grad_loss # + sdf_mins_loss # + 1e-3 * ll\r\n\r\n\r\nt_vars = tf.trainable_variables()\r\noptim = tf.train.AdamOptimizer(learning_rate=get_lr(global_step), beta1=0.9)\r\nloss_grads_and_vars = optim.compute_gradients(loss, var_list=t_vars)\r\nloss_optim = optim.apply_gradients(loss_grads_and_vars)\r\n\r\nconfig = tf.ConfigProto(allow_soft_placement=False) \r\n\r\nsaver_restore = tf.train.Saver(var_list=t_vars)\r\nsaver = tf.train.Saver(max_to_keep=2000000)\r\n\r\n\r\nwith tf.Session(config=config) as sess:\r\n feature_bs = []\r\n for i in range(SHAPE_NUM):\r\n tt = []\r\n for j in range(int(POINT_NUM)):\r\n t = np.zeros(SHAPE_NUM)\r\n t[i] = 1\r\n tt.append(t)\r\n feature_bs.append(tt)\r\n feature_bs = np.asarray(feature_bs)\r\n\r\n grid_points = []\r\n for i in range(VOX_SIZE):\r\n for j in range(VOX_SIZE):\r\n for k in range(VOX_SIZE):\r\n grid_points.append(np.array([i,j,k]))\r\n grid_points = np.asarray(grid_points)\r\n grid_points = smaller(grid_points)\r\n \r\n total_step = 4800\r\n if(TRAIN):\r\n print('train start')\r\n sess.run(tf.global_variables_initializer())\r\n start_time = time.time()\r\n \r\n POINT_NUM_GT_bs = np.array(POINT_NUM_GT).reshape(1,1)\r\n points_input_num_bs = np.array(POINT_NUM).reshape(1,1)\r\n \r\n all_loss = []\r\n sample_weight = None\r\n sample_time = 0\r\n train_time = 0\r\n for i in range(total_step + 10):\r\n epoch_index = np.random.choice(SHAPE_NUM, SHAPE_NUM, replace = False)\r\n loss_i = 0\r\n at = time.time()\r\n noises, selectgts = get_sample_gt_pair(gttxt, kdtree, BS, POINT_NUM, i, sample_weight)\r\n sample_time += time.time() - at\r\n for epoch in range(1):\r\n input_points_2d_bs = noises[:, epoch * POINT_NUM : (epoch + 1) * POINT_NUM]\r\n point_gt = selectgts[:, epoch * POINT_NUM : (epoch + 1) * POINT_NUM]\r\n feature_bs_t = feature_bs[0,:,:].reshape(1,-1,SHAPE_NUM)\r\n at = time.time()\r\n _,loss_c, l2loss, voxloss, sdfloss, gradloss = sess.run([loss_optim,loss, l2_loss, vox_loss, sdf_loss, grad_loss],feed_dict={global_step: i, input_points_3d:input_points_2d_bs,points_target:point_gt,feature:feature_bs_t,points_target_num:POINT_NUM_GT_bs,points_input_num:points_input_num_bs})\r\n train_time += time.time() - at\r\n loss_i = loss_i + loss_c\r\n # print(sdfs)\r\n loss_i = loss_i / SHAPE_NUM\r\n if(i%10 == 0):\r\n print('epoch:', i, 'epoch loss:', loss_i, 'l2 loss: ', l2loss, 'vox loss: ', voxloss, 'sdf loss: ', sdfloss, 'grad loss: ', gradloss)\r\n if(i%total_step == 0 and i > 10):\r\n print('save model')\r\n saver.save(sess, os.path.join(OUTPUT_DIR, \"model\"), global_step=i+1)\r\n end_time = time.time()\r\n print('time: ', train_time)\r\n else:\r\n print('test start')\r\n checkpoint = tf.train.get_checkpoint_state(OUTPUT_DIR).all_model_checkpoint_paths\r\n path = OUTPUT_DIR + 'model-' + str(INDEX * total_step + 1)\r\n print(path)\r\n saver.restore(sess, path)\r\n \r\n s = np.arange(-test_bd,test_bd, (2*test_bd)/TEST_VOX_SIZE)\r\n \r\n print(s.shape[0])\r\n vox_size = s.shape[0]\r\n POINT_NUM_GT_bs = np.array(vox_size).reshape(1,1)\r\n points_input_num_bs = np.array(POINT_NUM).reshape(1,1)\r\n input_points_2d_bs = []\r\n for i in s:\r\n for j in s:\r\n for k in s:\r\n input_points_2d_bs.append(np.asarray([i,j,k]))\r\n input_points_2d_bs = np.asarray(input_points_2d_bs)\r\n print('input_points_2d_bs',input_points_2d_bs.shape)\r\n input_points_2d_bs = input_points_2d_bs.reshape((vox_size,vox_size,vox_size,3))\r\n POINT_NUM_GT_bs = np.array(vox_size*vox_size).reshape(1,1)\r\n\r\n test_num = SHAPE_NUM\r\n print('test_num:',test_num)\r\n cd = 0\r\n nc = 0\r\n cd2 = 0\r\n for epoch in range(test_num):\r\n print('test:',epoch)\r\n vox = []\r\n voxgrad = []\r\n feature_bs = []\r\n for j in range(vox_size*vox_size):\r\n t = np.zeros(SHAPE_NUM)\r\n t[epoch] = 1\r\n feature_bs.append(t)\r\n feature_bs = np.asarray(feature_bs)\r\n for i in range(vox_size):\r\n input_points_2d_bs_t = input_points_2d_bs[i,:,:,:] \r\n input_points_2d_bs_t = input_points_2d_bs_t.reshape(BS, vox_size*vox_size, 3)\r\n feature_bs_t = feature_bs.reshape(BS,vox_size*vox_size,SHAPE_NUM)\r\n size_input_ss = np.random.rand() * 4 + 1\r\n sdf_c = sess.run([sdf],feed_dict={size_input: size_input_ss, size_input3: size_input_ss, size_input2: size_input_ss, input_points_3d:input_points_2d_bs_t,feature:feature_bs_t,points_target_num:POINT_NUM_GT_bs,points_input_num:points_input_num_bs})\r\n \r\n vox.append(sdf_c)\r\n vox = np.asarray(vox)\r\n vox = vox.reshape((vox_size,vox_size,vox_size))\r\n vox_max = np.max(vox.reshape((-1)))\r\n vox_min = np.min(vox.reshape((-1)))\r\n print('max_min:',vox_max,vox_min)\r\n \r\n threshs = [0, 0.001, 0.005] # , -0.001, -0.003, -0.005, -0.01, -0.02]\r\n for thresh in threshs:\r\n print(np.sum(vox>thresh),np.sum(vox0.0)0.0)>np.sum(vox<0.0)):\r\n triangles_t = []\r\n for it in range(triangles.shape[0]):\r\n tt = np.array([triangles[it,2],triangles[it,1],triangles[it,0]])\r\n triangles_t.append(tt)\r\n triangles_t = np.asarray(triangles_t)\r\n else:\r\n triangles_t = triangles\r\n triangles_t = np.asarray(triangles_t)\r\n\r\n # vertices -= 0.5\r\n # Undo padding\r\n vertices -= 1\r\n # Normalize to bounding box\r\n vertices /= np.array([vox_size-1, vox_size-1, vox_size-1])\r\n vertices = 1.1 * (vertices - 0.5)\r\n\r\n vertices += 0.5\r\n vertices = vertices * (maxn - minn) + minn\r\n\r\n mesh = trimesh.Trimesh(vertices, triangles_t,\r\n vertex_normals=None,\r\n process=False)\r\n\r\n name = objn\r\n mesh.export(OUTPUT_DIR + '/occn_' + name + '_'+ str(INDEX*100 + 1) + '_' + str(thresh) + '.off')\r\n \r\n mesh = trimesh.Trimesh(vertices, triangles,\r\n vertex_normals=None,\r\n process=False)\r\n # if(a.dataset == 'other'):\r\n # continue\r\n if(a.dataset==\"shapenet\" or a.dataset=='other'):\r\n ps, idx = mesh.sample(1000000, return_index=True)\r\n else:\r\n ps, idx = mesh.sample(10000, return_index=True)\r\n ps = ps.astype(np.float32)\r\n normals_pred = mesh.face_normals[idx]\r\n \r\n if False: # (a.dataset==\"shapenet\" or a.dataset == 'other'):\r\n data = np.load(GT_DIR + name + '/pointcloud.npz')\r\n pointcloud = data['points']\r\n normal = data['normals']\r\n else:\r\n mesh_gt = trimesh.load(GT_DIR + name + '.ply')\r\n pointcloud, idx_gt = mesh_gt.sample(10000, return_index=True)\r\n pointcloud = pointcloud.astype(np.float32)\r\n normal = mesh_gt.face_normals[idx_gt]\r\n \r\n nc_t,cd_t,cd2_t = eval_pointcloud(ps,pointcloud.astype(np.float32),normals_pred.astype(np.float32),normal.astype(np.float32))\r\n np.savez(OUTPUT_DIR + name + '_'+ str(thresh),pp = ps, np = normals_pred, p = pointcloud, n = normal, nc = nc_t, cd = cd_t, cd2 = cd2_t)\r\n nc = nc + nc_t\r\n cd = cd + cd_t\r\n cd2 = cd2 + cd2_t\r\n print('mean_nc:',nc/test_num,'mean_cd:',cd/test_num,'cd2:',cd2/test_num)\r\n \r\n \r\n \r\n","repo_name":"chenchao15/GridPull","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24235,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"4929195756","text":"import random\r\nimport time\r\nimport requests\r\nfrom lxml import etree\r\nheaders = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'\r\n}\r\nurl = 'https://movie.douban.com/review/best/'\r\nres = requests.get(url, headers=headers)\r\ntext = res.content.decode()\r\ntree = etree.HTML(text)\r\n# div = tree.xpath('//div[@class=\"review-list chart \"]/div/div')\r\ndiv = tree.xpath('//div[@class=\"main review-item\"]')\r\n# print(div)\r\nfor d in div:\r\n # 获取详情超链接\r\n href = d.xpath('./a/@href')\r\n # short_content = d.xpath('./div/div[1]/div/text()')\r\n # 简短简介\r\n short_content = d.xpath('./div//div[@class=\"short-content\"]/text()')\r\n # 获取完整简介的idz值 用于拼接完整的简介的url\r\n r_id = d.xpath('./@id')[0]\r\n # 请求完整简介的url\r\n r_url = f'https://movie.douban.com/j/review/{r_id}/full'\r\n r_res = requests.get(r_url, headers=headers)\r\n print(r_res.json()['html'])\r\n time.sleep(random.randint(1, 3))\r\n\r\n'''\r\n# 分析完整简介的url\r\nhttps://movie.douban.com/j/review/14584121/full\r\nhttps://movie.douban.com/j/review/14576114/full\r\n'''","repo_name":"IridescentLee/PythonTest","sub_path":"day10-08-17-requests实战案例/4抓取影评标题完整简介.py","file_name":"4抓取影评标题完整简介.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34841847806","text":"import minerl\n\ndata = minerl.data.make('MineRLNavigateExtremeDense-v0')\n\npov_shape = (64,64,3)\n# Iterate through a single epoch gathering sequences of at most 32 steps\nfor obs, rew, done, act in data.seq_iter(num_epochs=1, max_sequence_len=32):\n for x in obs['pov']:\n if x.shape != pov_shape:\n print(f'mineRL bug, pov shape: {x.shape}')\n","repo_name":"albertwujj/minerl","sub_path":"tutorial/data_sample.py","file_name":"data_sample.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5200062618","text":"import os\n\nfrom torch.utils.data import DataLoader, Dataset\nimport torchvision.transforms as transforms\nfrom tools import captcha_info\nfrom PIL import Image\nfrom tools import trans\n\n# 图片的预处理\ntransform = transforms.Compose([\n transforms.Resize((80, 200)),\n transforms.Grayscale(), # 灰度化 可以减少颜色信息对模型训练的影响\n transforms.ToTensor() # 转化为tensor型数据\n])\n\n\nclass mydataset(Dataset):\n \"\"\"\n mydataset继承自Dataset\n \"\"\"\n\n def __init__(self, folder, transform=None):\n self.train_image_file_paths = [os.path.join(folder, image_file) for image_file in os.listdir(folder)]\n self.transform = transform\n\n def __len__(self):\n return len(self.train_image_file_paths)\n\n def __getitem__(self, idx):\n image_root = self.train_image_file_paths[idx]\n image_name = image_root.split(os.path.sep)[-1] # image_name记录文件名\n # print(image_name)\n image = Image.open(image_root)\n if self.transform is not None:\n image = self.transform(image) # 转化\n label = trans.encode(image_name.split('_')[0]) # label取自文件名,即真实值\n # print(type(label))\n return image, label\n\n\ndef Get_train_Dataloader():\n dataset = mydataset(captcha_info.Train_Dataset, transform=transform)\n return DataLoader(dataset, batch_size=64, shuffle=True, num_workers=4, pin_memory=True)\n\n\ndef Get_test_Dataloader():\n dataset = mydataset(captcha_info.Test_Dataset, transform=transform)\n return DataLoader(dataset, batch_size=1, shuffle=True)\n\n\nif __name__ == '__main__':\n '''\n 测试\n '''\n","repo_name":"gitover22/captcha_recognition","sub_path":"tools/my_dataset.py","file_name":"my_dataset.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17763481653","text":"import os\nfrom ConfigParser import NoSectionError\nfrom vsc.utils.generaloption import GeneralOption\n\n\n## Configfiles\n# this one is shipped with the rpm\nDEFAULT_CONFIGFILE = '/etc/manage_defaults.cfg'\n# this one is if you want to overwrite some fo the the defaults with your own, without having to edit the original\nCONFIGFILES = [\"/etc/manage.cfg\", os.path.expanduser(\"~/.local/manage.cfg\")]\n\nCONFIG = {}\nOPTIONS = None\nLOGGER = None\n\n\ndef parseoptions():\n \"\"\"\n Parses the options\n \"\"\"\n #be as restrictive as possible\n #TODO: add softreboot, watch out, dracsoftreboot is actually stil pretty hard!\n #so implement properly\n # {longopt:(help_description,type,action,default_value,shortopt),}\n parser = ManageOptionParser(go_configfiles=[DEFAULT_CONFIGFILE] + CONFIGFILES,\n go_mainbeforedefault=True,\n go_prefixloggername=True)\n options = parser.options\n global LOGGER\n LOGGER = parser.log\n\n if options.poweroff and (options.poweron or options.hardreboot) or (options.poweron and options.hardreboot):\n parser.log.error(\"--hardreboot, --poweron and --poweroff are mutually exclusive\")\n if options.restart and not options.forced:\n parser.log.error(\"You trying to restart the scheduler\"\n \"without the --forced option, do you know what you are doing?\")\n\n #TODO: (low) more options checking? - running without option doesn't show a usage flag?\n\n #args should be empty, since everything is optional\n if len(parser.args) > 1:\n parser.log.error(\"Invalid arguments\")\n\n global CONFIG\n # parse constants\n try:\n for opt, val in parser.configfile_parser.items('raw_configs', raw=True):\n parser.log.debug(\"adding %s to config (from configfile) \", opt)\n CONFIG[opt] = val\n except NoSectionError:\n parser.log.error(\"Could not find the [raw_configs] section in the configfile, make sure at least %s is present\",\n DEFAULT_CONFIGFILE)\n\n global OPTIONS\n OPTIONS = parser.options\n\n\ndef get_config(name=None):\n \"\"\"Returns the global config dict\"\"\"\n global CONFIG\n if name is not None:\n try:\n return CONFIG[name.lower()]\n except KeyError:\n LOGGER.raiseException(\"Error: Could not find configuration for '%s', make sure it is in %s or is properly \"\n \"added in %s, or alternatively change the location of these config files in %s\" %\n (name, DEFAULT_CONFIGFILE, CONFIGFILES, __file__))\n return CONFIG\n\n\ndef get_options():\n \"\"\"Return the global options object\"\"\"\n return OPTIONS\n\n\nclass Options(object):\n \"\"\"\n dummy class\n you can set attributes here\n so you can create objects like the options object returned by optparse\n \"\"\"\n def __init__(self):\n \"\"\"\n provide some defaults\n \"\"\"\n self.verbose = 2\n self.non_threaded = False\n self.test_run = False\n self.forced = False\n self.ack = None\n self.ack_service = None\n self.downtime = None\n self.comment = None\n #actions\n self.state = False\n self.poweron = False\n self.setonline = False\n self.setoffline = False\n self.hardreboot = False\n self.reboot = False\n self.pbsmomstatus = False\n self.pbsmomrestart = False\n self.pbsmomstop = False\n self.pbsmomcleanup = False\n self.fix_downonerror = False\n self.runcmd = None\n self.poweroff = False\n self.powercut = False\n self.ledon = False\n self.ledoff = False\n self.co = None\n #node selection\n self.storage = False\n self.cluster = None\n self.master = None\n self.chassis = None\n self.down = False\n self.worker = False\n self.quattor = True\n self.all_nodes = False\n self.imms = False\n self.idle = False\n self.offline = False\n self.node = \"\"\n #cluster actions\n self.pause = False\n self.resume = False\n self.restart = False\n\n\nclass ManageOptionParser(GeneralOption):\n \"\"\"\n Optionparser for manage\n \"\"\"\n\n def general_options(self):\n \"\"\"Set the general options\"\"\"\n # general options\n general_optiongroup = {\n \"verbose\": (\"Enable extra output, use -vv for even more output\", None, \"count\", 0, 'v'),\n \"forced\": (\"Included as a dummy protection. If a special node is selected, --forced is required as well\",\n None, \"store_true\", False, \"f\"),\n \"test-run\": (\"Print what would be done, without actually doing anything\", None, 'store_true', False, 't'),\n \"non-threaded\": (\"Disable threading, do commands one by one\", None, \"store_true\", False),\n \"cluster\": (\"Specify the cluster to run on, When not specified, the script will attempt to detect the current \"\n \"cluster. All operations can only affect one cluster at a time\",\n None, \"store\", None, \"C\")\n }\n self.add_group_parser(general_optiongroup, (\"General Options\", \"General options for Manage\"))\n\n def node_options(self):\n \"\"\"Set the node selection options\"\"\"\n # node selection\n descr = (\"Node selection\", \"Use these to select a node \"\n \"or a group of nodes. All selections will be added up. \"\n \"F.ex. -od will select all nodes that are either down or offline\")\n\n nodesel_group = {\n \"idle\": (\"Select all free/idle nodes\", None, \"store_true\", False, 'i'),\n \"worker\": (\"Select all worker nodes\", None, \"store_true\", False, 'a'),\n \"down\": (\"Select all down nodes\", None, \"store_true\", False),\n \"offline\": (\"Select all offline nodes\", None, \"store_true\", False, 'o'),\n \"node\": (\"Select a specific node. Multiple worker nodes can be given separated by ',', no spaces.\"\n \"This will bypass pbs server. You can also specify a range of nodes, when separated by '-'. e.g.\"\n \"'node601,605-608,203-208\", \"string\", \"store\", None, 'n'),\n \"master\": (\"Select a master node WARNING: this will bypass the PBS server.\"\n \"Multiple nodes can be given separated by ',', no spaces.\", \"string\", \"store\", None),\n \"storage\": (\"Select this master\", \"string\", \"store\", None),\n \"chassis\": (\"Select all nodes in a chassis, as it is listed in quattor, e.g. mmodule01.gengar.gent/vsc\"\n \"WARNING: this might also include masters\", \"string\", \"store\", None),\n \"quattor\": (\"Use quattor to select nodes instead of pbsnodes on the master.\"\n \" this might be faster, and also works if the master is offline.\"\n \" This option will be ignored when using the idle, down or offline node selection.\",\n None, \"store_true\", False, 'q'),\n \"all-nodes\": (\"Select all servers, WARNING: THIS WILL INCLUDE MASTERS\", None, \"store_true\", False),\n }\n self.add_group_parser(nodesel_group, descr)\n\n def action_options(self):\n \"\"\"\"Set the action options\"\"\"\n\n # action selection\n #TODO (high) don't reboot anything where the sheduling is on\n descr = (\"Node actions\", \"Use these to select the sepecific action you want to do on the selected node(s).\"\n \"You can select multiple actions. They will be run in the order as shown here.\")\n\n actiongroup = {\n \"state\": (\"State information (tcp, ssh, nodestate, power).\"\n \"This will run instantly (on the master), no test-run here\", None, \"store_true\", False, 's'),\n \"setoffline\": (\"Set the selected nodes offline\", None, \"store_true\", False),\n \"setonline\": (\"Set the selected nodes online\", None, \"store_true\", False),\n \"pbsmomcleanup\": (\"Run pbs cleanupscripts\", None, \"store_true\", False),\n \"pbsmomstop\": (\"Stop pbs_mom on selected nodes\", None, \"store_true\", False),\n \"runcmd\": (\"Run a (bash) command on the selected nodes\", \"string\", \"store\", None),\n \"pbsmomstatus\": (\"gives the status of pbsmom on the selected nodes\", None, \"store_true\", False),\n \"poweroff\": (\"Power off the selected nodes in a clean way\", None, \"store_true\", False),\n \"powercut\": (\"Power off the selected nodes as soon as possible\", None, \"store_true\", False),\n \"poweron\": (\"Power on the selected nodes\", None, \"store_true\", False),\n \"reboot\": (\"Reboot the selected nodes in a clean way\", None, \"store_true\", False),\n \"hardreboot\": (\"Power cycle the selected nodes\", None, \"store_true\", False),\n \"pbsmomrestart\": (\"Restart pbs_mom on the selected nodes\", None, \"store_true\", False),\n \"ledon\": (\"Turn on the locator led of the selected nodes\", None, \"store_true\", False),\n \"ledoff\": (\"Turn off the locator led of the selected nodes\", None, \"store_true\", False),\n \"fix-downonerror\": (\"Fix the down on error status (for the selected workernodes)\", None, \"store_true\", False),\n \"co\": (\"Run a quattor component on the selected nodes, e.g. spma,cron\", \"string\", \"store\", None),\n }\n self.add_group_parser(actiongroup, descr)\n\n def clusteraction_options(self):\n \"\"\"Set the clusteaction options\"\"\"\n\n # actions on a cluster\n descr = (\"Cluster actions\", \"Use these to select the specific action you want to run on the selected cluster. These are run in the order as listed here\")\n clusteractiongroup = {\n \"pause\": (\"Pause the scheduler\", None, \"store_true\", False, 'p'),\n \"resume\": (\"Resume the scheduler\", None, \"store_true\", False, 'r'),\n \"restart\": (\"Restart the scheduler, Warning: will pass all jobs through GOLD before 1st rescheduling,\"\n \"--force needed\", None, \"store_true\", False),\n }\n self.add_group_parser(clusteractiongroup, descr)\n\n def monitoring_options(self):\n \"\"\"Set the monitoring options\"\"\"\n # actions involving monitoring\n descr = (\"Monitoring options\", \"Use these to enable notification of the monitoring service\")\n monitoringgroup = {\n \"ack\": (\"Acknowledge a problem with all selected nodes.\", None, \"store_true\", False),\n \"ack-service\": (\"Acknowledge a problem with a service on all selected nodes\", \"string\", \"store\", None),\n \"downtime\": (\"Schedule all selected nodes and it's services for a downtime (in hours)\", \"string\", \"store\", None),\n \"comment\": (\"Set the comment for the acknowledgement or scheduled downtime\", \"string\", \"store\", None),\n \"imms\": (\"also select the imms of the selected nodes, this is only used for acknowleding problems to monitoring\",\n None, \"store_true\", False),\n }\n self.add_group_parser(monitoringgroup, descr)\n\n\n# the options needs to be parsed before get_config can be successfully ran\nparseoptions()\n","repo_name":"pombredanne/vsc-manage","sub_path":"lib/vsc/manage/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":11195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12075912605","text":"#!/usr/bin/env python3\n\nimport struct\nimport uuid\nimport hashlib\nimport enum\nimport re\nfrom typing import Tuple\n#from .efivar import efiDevicePath, efiInitialize\n\n# ########################################\n# convert byte buffers with null terminated C strings to python strings\n# ########################################\n\ndef nullterm8 (buffer: bytes) -> str:\n return buffer.decode('utf-8').split('\\x00')[0]\n\ndef nullterm16 (buffer: bytes) -> str:\n return buffer.decode('utf-16').split('\\u0000')[0]\n\n# ########################################\n# enumeration of all event types\n# ########################################\n\nclass Event(enum.IntEnum):\n EV_PREBOOT_CERT = 0x0\n EV_POST_CODE = 0x1\n EV_UNUSED = 0x2\n EV_NO_ACTION = 0x3\n EV_SEPARATOR = 0x4\n EV_ACTION = 0x5\n EV_EVENT_TAG = 0x6\n EV_S_CRTM_CONTENTS = 0x7\n EV_S_CRTM_VERSION = 0x8\n EV_CPU_MICROCODE = 0x9\n EV_PLATFORM_CONFIG_FLAGS = 0xa\n EV_TABLE_OF_DEVICES = 0xb\n EV_COMPACT_HASH = 0xc\n EV_IPL = 0xd\n EV_IPL_PARTITION_DATA = 0xe\n EV_NONHOST_CODE = 0xf\n EV_NONHOST_CONFIG = 0x10\n EV_NONHOST_INFO = 0x11\n EV_OMIT_BOOT_DEVICE_EVENTS = 0x12\n EV_EFI_EVENT_BASE = 0x80000000\n EV_EFI_VARIABLE_DRIVER_CONFIG = EV_EFI_EVENT_BASE + 0x1\n EV_EFI_VARIABLE_BOOT = EV_EFI_EVENT_BASE + 0x2\n EV_EFI_BOOT_SERVICES_APPLICATION = EV_EFI_EVENT_BASE + 0x3\n EV_EFI_BOOT_SERVICES_DRIVER = EV_EFI_EVENT_BASE + 0x4\n EV_EFI_RUNTIME_SERVICES_DRIVER = EV_EFI_EVENT_BASE + 0x5\n EV_EFI_GPT_EVENT = EV_EFI_EVENT_BASE + 0x6\n EV_EFI_ACTION = EV_EFI_EVENT_BASE + 0x7\n EV_EFI_PLATFORM_FIRMWARE_BLOB = EV_EFI_EVENT_BASE + 0x8\n EV_EFI_HANDOFF_TABLES = EV_EFI_EVENT_BASE + 0x9\n EV_EFI_PLATFORM_FIRMWARE_BLOB2 = EV_EFI_EVENT_BASE + 0xa\n EV_EFI_HANDOFF_TABLES2 = EV_EFI_EVENT_BASE + 0xb\n EV_EFI_VARIABLE_BOOT2 = EV_EFI_EVENT_BASE + 0xc\n EV_EFI_VARIABLE_AUTHORITY = EV_EFI_EVENT_BASE + 0xe0\n\n EV_UNKNOWN = 0xFFFFFFFF\n\n# ########################################\n# enumeration of event digest algorithms\n# TPM2_ALG_ from TCG algorithm registry\n#\n# NOTE The overlap with python's hashlib is low, but\n# it apparently covers most use cases ...\n# ########################################\n\nclass Digest (enum.IntEnum):\n sha1 = 4\n sha256 = 11\n sha384 = 12\n sha512 = 13\n sha3_224 = 0x27\n sha3_256 = 0x28\n sha3_512 = 0x29\n\n# ########################################\n# Event digests\n# TCG PC Client platform firmware profile, TPML_DIGEST_VALUES, Section 10.2.2\n# ########################################\n\nclass EfiEventDigest:\n hashalgmap={\n Digest.sha1: hashlib.sha1,\n Digest.sha256: hashlib.sha256,\n Digest.sha384: hashlib.sha384,\n Digest.sha512: hashlib.sha512\n }\n\n # constructor for a digest\n def __init__(self, algid: int, buffer: bytes, idx: int):\n self.algid = Digest(algid)\n assert self.algid in EfiEventDigest.hashalgmap\n self.hashalg = EfiEventDigest.hashalgmap[self.algid]()\n self.digest_size = self.hashalg.digest_size\n self.digest = buffer[idx:idx+self.digest_size]\n\n # JSON converter -- returns something that can be encoded as JSON\n def toJson(self):\n return { 'AlgorithmId': self.algid.name, 'Digest': self.digest.hex() }\n\n # ----------------------------------------\n # parse a list of digests in the event log\n # ----------------------------------------\n # inputs:\n # digestcount: how many digests to parse\n # idx: index in the buffer we are parsing\n # buffer: input buffer we are parsing\n # outputs:\n # idx: index of first unparsed byte in buffer\n # digests: list of parsed digests\n\n @classmethod\n def parselist (cls, digestcount:int, buffer: bytes, idx: int) -> Tuple[dict, int]:\n digests = {}\n for _ in range(0,digestcount):\n (algid,)=struct.unpack('= idx + self.evsize, f'Event log truncated, GenericEvent, evt.idx = {self.evidx}'\n self.evbuf = buffer[idx:idx+self.evsize]\n\n try:\n self.evtype = Event(eventheader[0])\n except Exception as _:\n self.evtype = Event.EV_UNKNOWN\n\n @classmethod\n def Parse(cls, eventheader: Tuple[int, int, dict, int, int], buffer: bytes, idx: int):\n return cls(eventheader, buffer, idx)\n\n # pylint: disable=no-self-use\n def validate (self) -> Tuple[bool,bool,str]:\n return True,True,''\n\n def toJson (self):\n return {\n 'EventType': self.evtype.name,\n 'EventNum': self.evidx,\n 'PCRIndex': self.evpcr,\n 'EventSize': self.evsize,\n 'DigestCount': len(self.digests),\n 'Digests': list(map(lambda o: o[1], self.digests.items())),\n 'Event': self.evbuf[:1024].hex()\n }\n\n# ########################################\n# Events that can be validated by CONTENT_MATCHES_DIGEST\n# TCG Guidance on Integrity Measurements and Event Log Processing, V1, Rev 0.118, 12/15/2021, Section 7.2.5.1\n# EV_S_CRTM_VERSION\n# EV_EFI_VARIABLE_DRIVER_CONFIG\n# EV_SEPARATOR\n# EV_EFI_GPT_EVENT\n# EV_EFI_VARIABLE_BOOT\n# ########################################\n\nclass ValidatedEvent (GenericEvent):\n def validate(self) -> Tuple[bool,bool,str]:\n for algid in self.digests.keys():\n refdigest = self.digests[algid].digest\n calchash1 = EfiEventDigest.hashalgmap[algid](self.evbuf).digest()\n if refdigest != calchash1:\n return False,False,str(self.evtype.name)\n return False,True,''\n\n# ########################################\n# POST CODE event -- interpreted as a string\n# or else interpreted as a blob base/lenth pair\n# ########################################\n\nclass PostCodeEvent (GenericEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n if self.evsize == 16:\n (self.blobBase, self.blobLength) = struct.unpack(' dict:\n if self.blobBase is not None:\n evt = { 'BlobBase': self.blobBase, 'BlobLength': self.blobLength }\n else:\n evt = self.evbuf.decode('utf-8')\n return {\n ** super().toJson(),\n 'Event': evt\n }\n\n# ########################################\n# Event type: firmware blob measurement\n# ########################################\n\nclass FirmwareBlobEvent (GenericEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n (self.base,self.length)=struct.unpack(' dict:\n return { ** super().toJson(), 'Event': {\n 'BlobBase': self.base,\n 'BlobLength': self.length\n }}\n\n\n# ########################################\n# EFI IPL event. Used during initial program load.\n# ########################################\n# Differs from generic events in that the body of the\n# event is a zero terminated UTF-8 string describing\n# what is being loaded.\n#\n# NOTE there is a confusion about zero termination,\n# and this puts correct processing into doubt.\n# tpm2_eventlog is known to mess up and generate\n# non-UTF-8 YAML in certain cases, which yq then chokes on.\n# ########################################\n\nclass EfiIPLEvent (GenericEvent):\n def toJson (self) -> dict:\n return {\n ** super().toJson(),\n 'Event': { 'String': nullterm8(self.evbuf[:-1]) }\n }\n\n# ########################################\n# Spec ID Event\n# ########################################\n# This is the first event in the log, and gets a lot of\n# special processing.\n# ########################################\n\nclass SpecIdEvent (GenericEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n# self.signature = uuid.UUID(bytes_le=buffer[idx:idx+16])\n (self.signature, self.platformClass, self.specVersionMinor, self.specVersionMajor,\n self.specErrata, self.uintnSize, self.numberOfAlgorithms) = struct.unpack('<16sIBBBBI', buffer[idx+0:idx+28])\n idx += 28\n self.alglist = []\n for x in range(0, self.numberOfAlgorithms):\n (algid, digsize) = struct.unpack(' 0:\n j['SpecID'][0]['vendorInfo'] = self.vendorInfo.decode('utf-8')\n return j\n\n# ########################################\n# Event type: EFI variable measurement\n# ########################################\n\nclass EfiVarEvent (ValidatedEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n self.guid = uuid.UUID(bytes_le=buffer[idx:idx+16])\n self.gg = buffer[idx:idx+16]\n (self.namelen,self.datalen) = struct.unpack(' dict:\n return { ** super().toJson(),\n 'Event': {\n 'UnicodeName' : self.name.decode('utf-16'),\n 'UnicodeNameLength': self.namelen,\n 'VariableDataLength': self.datalen,\n 'VariableName': str(self.guid),\n 'VariableData': self.data.hex()\n }}\n\n# ########################################\n# EFI variable authority event (EV_EFI_VARIABLE_AUTHORITY).\n# Contains a single signature, a boolean or a string.\n# Booleans are easy to find (datalen==1)\n# It is unclear what general rule decides between strings and signatures.\n# ########################################\n\nclass EfiVarAuthEvent(EfiVarEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n self.sigdata = EfiSignatureData(self.data, self.datalen, 0)\n\n @classmethod\n def Parse(cls, eventheader: Tuple, buffer: bytes, idx: int):\n (namelen,datalen) = struct.unpack(' dict:\n j = super().toJson()\n j['Event']['VariableData'] = [ self.sigdata ]\n return j\n\n # signature data are not subject to validation.\n def validate(self) -> Tuple[bool, bool, str]:\n return True, True, ''\n\n# ########################################\n# Boolean variable readout event\n# ########################################\n\nclass EfiVarBooleanEvent(EfiVarEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n (self.enabled,) = struct.unpack(' dict:\n j = super().toJson()\n j['Event']['VariableData'] = { 'Enabled' : 'Yes' if self.enabled else 'No' }\n return j\n\n# ########################################\n# String event\n# ########################################\n\nclass EfiVarStringEvent(EfiVarEvent):\n def toJson (self) -> dict:\n j = super().toJson()\n j['Event']['VariableData'] = { 'String' : self.data.decode('utf-8') }\n return j\n\n# ########################################\n# Hex event\n# ########################################\n\nclass EfiVarHexEvent(EfiVarEvent):\n def toJson (self) -> dict:\n j = super().toJson()\n j['Event']['VariableData'] = self.data.hex()\n return j\n\n# ########################################\n# EFI variable: boot entry\n# ########################################\n\nclass EfiVarBootEvent (EfiVarEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n # EFI_LOAD_OPTION, https://dox.ipxe.org/UefiSpec_8h_source.html, line 2069\n (self.attributes, self.filepathlistlength) = struct.unpack(' Tuple[bool,bool,str]:\n for algid in self.digests.keys():\n refdigest = self.digests[algid].digest\n calchash1 = EfiEventDigest.hashalgmap[algid](self.evbuf).digest()\n calchash2 = EfiEventDigest.hashalgmap[algid](self.data).digest()\n if refdigest not in (calchash1, calchash2):\n return False,False,str(self.name.decode('utf-16'))\n return False,True,''\n\n def toJson (self) -> dict:\n j = super().toJson()\n j['Event']['VariableData'] = {\n 'Enabled' : 'Yes' if (self.attributes & 1) == 1 else 'No',\n 'FilePathListLength': self.filepathlistlength,\n 'Description': self.description.decode('utf-16'),\n 'DevicePath': self.devicePath\n }\n return j\n\n# ########################################\n# EFI variable: Boot order\n# ########################################\n\nclass EfiVarBootOrderEvent(EfiVarEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n assert (self.datalen % 2) == 0\n self.bootorder = struct.unpack(f'<{self.datalen//2}H', self.data)\n\n def toJson (self) -> dict:\n j = super().toJson()\n j['Event']['VariableData'] = list(map(lambda a: f'Boot{a:04x}', self.bootorder))\n return j\n\n # the published digest can match the entire event buffer or just the data portion (without the name).\n def validate(self) -> Tuple[bool,bool,str]:\n for algid in self.digests.keys():\n refdigest = self.digests[algid].digest\n calchash1 = EfiEventDigest.hashalgmap[algid](self.evbuf).digest()\n calchash2 = EfiEventDigest.hashalgmap[algid](self.data).digest()\n if refdigest not in (calchash1, calchash2):\n return False,False,str(self.name.decode('utf-16'))\n return False,True,''\n\n# ########################################\n# EFI signature event: an EFI variable event for secure boot variables.\n# ########################################\n\nclass EfiSignatureListEvent(EfiVarEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n idx2 = 0\n self.varlist = []\n while idx2 < self.datalen:\n var = EfiSignatureList (self.data, idx2)\n idx2 += var.listsize\n self.varlist.append(var)\n\n def toJson (self) -> dict:\n j = super().toJson()\n if len(self.varlist) == 0:\n j['Event']['VariableData'] = None\n else:\n j['Event']['VariableData'] = self.varlist\n return j\n\n# ########################################\n# A list of EFI signatures\n# ########################################\n# UEFI Spec 2.88 Section 32.4.1, EFI_SIGNATURE_LIST\n\nclass EfiSignatureList:\n def __init__ (self, buffer, idx):\n self.sigtype = uuid.UUID(bytes_le=buffer[idx:idx+16])\n (self.listsize, self.hsize, self.sigsize) = struct.unpack(' dict:\n return {\n 'SignatureType': str(self.sigtype),\n 'SignatureHeaderSize': self.hsize,\n 'SignatureListSize': self.listsize,\n 'SignatureSize': self.sigsize,\n 'Keys': self.keys\n }\n\n\n# ########################################\n# An EFI signature\n# ########################################\n# UEFI Spec 2.88 Section 32.4.1, EFI_SIGNATURE_DATA\n\nclass EfiSignatureData:\n def __init__ (self, buffer: bytes, sigsize, idx):\n assert len(buffer) >= 16, f'EFI signature truncated, expected 16, found {len(buffer)} bytes'\n self.owner = uuid.UUID(bytes_le=buffer[idx:idx+16])\n self.sigdata = buffer[idx+16:idx+sigsize]\n\n def toJson (self) -> dict:\n return {\n 'SignatureOwner': str(self.owner),\n 'SignatureData': self.sigdata.hex()\n }\n\n# ########################################\n# EFI action event\n# ########################################\n\nclass EfiActionEvent (GenericEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n self.event = buffer[idx:idx+self.evsize]\n def toJson (self) -> dict:\n return { ** super().toJson(), 'Event': self.event.decode('utf-8') }\n\n\n# ########################################\n# EFI GPT event (a GPT partition table description event)\n# ########################################\n\nclass EfiGPTEvent (ValidatedEvent):\n # Embedded class: GPT Partition header, UEFI Spec version 2.88 Errata B Section 5.3.2 Table 21\n class GPTPartHeader:\n def __init__ (self, buffer, idx):\n (self.signature, self.revision, self.headerSize,\n self.headerCRC32, _, self.MyLBA, self.alternateLBA,\n self.firstUsableLBA, self.lastUsableLBA, guidbytes,\n self.partitionEntryLBA, self.numPartitionEntries, self.sizeOfPartitionEntry,\n self.partitionEntryArrayCRC) = struct.unpack('<8sIIIIQQQQ16sQIII', buffer[idx:idx+92])\n self.diskGuid = uuid.UUID(bytes_le=guidbytes)\n\n def toJson (self) -> dict:\n return {\n 'Signature' : self.signature.decode('utf-8'),\n 'Revision' : self.revision,\n 'HeaderSize' : self.headerSize,\n 'HeaderCRC32' : self.headerCRC32,\n 'MyLBA' : self.MyLBA,\n 'AlternateLBA' : self.alternateLBA,\n 'FirstUsableLBA' : self.firstUsableLBA,\n 'LastUsableLBA' : self.lastUsableLBA,\n 'DiskGUID' : str(self.diskGuid),\n 'PartitionEntryLBA' : self.partitionEntryLBA,\n 'NumberOfPartitionEntry' : self.numPartitionEntries,\n 'SizeOfPartitionEntry' : self.sizeOfPartitionEntry,\n 'PartitionEntryArrayCRC32': self.partitionEntryArrayCRC\n }\n\n # Embedded class: GPT Partition entry, UEFI Spec version 2.88 Errata B Section 5.3.3 Table 22\n class GPTPartEntry:\n def __init__(self, buffer, idx):\n self.partitionTypeGUID = uuid.UUID(bytes_le=buffer[idx:idx+16])\n self.uniquePartitionGUID = uuid.UUID(bytes_le=buffer[idx+16:idx+32])\n (self.startingLBA, self.endingLBA,\n self.attributes, self.partitionName) = struct.unpack(' dict:\n return {\n 'PartitionTypeGUID' : str(self.partitionTypeGUID),\n 'UniquePartitionGUID' : str(self.uniquePartitionGUID),\n 'Attributes' : self.attributes,\n 'StartingLBA' : self.startingLBA,\n 'EndingLBA' : self.endingLBA,\n 'PartitionName' : nullterm16(self.partitionName)\n }\n\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n self.gptheader = self.GPTPartHeader(buffer, idx)\n idx += self.gptheader.headerSize\n (self.numparts,) = struct.unpack(' dict:\n return { ** super().toJson(),\n 'Event': {\n 'Header': self.gptheader.toJson(),\n 'NumberOfPartitions': self.numparts,\n 'Partitions': self.partitions\n }}\n\n\n# ########################################\n# Event type: uefi image load\n# TCG PC Client platform firmware profile, UEFI_IMAGE_LOAD_EVENT, Section 10.2.3\n# ########################################\n\nclass UefiImageLoadEvent (GenericEvent):\n def __init__ (self, eventheader: Tuple, buffer: bytes, idx: int):\n super().__init__(eventheader, buffer, idx)\n (self.addrinmem,self.lengthinmem,self.linktimeaddr,self.lengthofdevpath)=struct.unpack(' dict:\n j = super().toJson()\n j['Event'] = {\n 'ImageLocationInMemory': self.addrinmem,\n 'ImageLengthInMemory': self.lengthinmem,\n 'ImageLinkTimeAddress': self.linktimeaddr,\n 'LengthOfDevicePath': self.lengthofdevpath,\n 'DevicePath': str(self.devpath)\n }\n return j\n\n\n# TCG PC Client Specific Implementation Specification for Conventional BIOS\n\n# ########################################\n# Event Log is really a list of events.\n# ########################################\n# We are adding a number of class methods to help parse events.\n# The constructor, when invoked on a buffer, performs the parsing.\n# ########################################\n\nclass EventLog(list):\n def __init__ (self, buffer: bytes, buflen: int):\n# efiInitialize()\n list.__init__(self)\n self.buflen = buflen\n evt, idx = EventLog.Parse_1stevent(buffer, 0)\n self.append(evt)\n evidx = 1\n while idx < buflen:\n evt, idx = EventLog.Parse_event(evidx, buffer, idx)\n self.append(evt)\n evidx += 1\n\n # parser for 1st event\n # TCG PC client platform firmware profile spec, structure: TCG_PCClientPCREvent, Section 10.2.1\n @classmethod\n def Parse_1stevent(cls, buffer: bytes, idx: int) -> Tuple[GenericEvent, int]:\n (evpcr, evtype, digestbuf, evsize)=struct.unpack(' Tuple[GenericEvent, int]:\n (evpcr, evtype, digestcount)=struct.unpack(' dict:\n algid=Digest.sha1\n d0 = EfiEventDigest.hashalgmap[algid]()\n pcrs = {}\n for event in self:\n if event.evtype == 3:\n continue # do not measure NoAction events\n pcridx = event.evpcr\n oldpcr = pcrs[pcridx] if pcridx in pcrs else bytes(d0.digest_size)\n extdata = event.digests[algid].digest\n newpcr = EfiEventDigest.hashalgmap[algid](oldpcr+extdata).digest()\n pcrs[pcridx] = newpcr\n return pcrs\n\n # run validation on all events\n def validate (self) -> list[list[Tuple]]:\n pass_list = []\n fail_list = []\n vac_list = []\n for evt in self:\n vacuous, passed, why = evt.validate()\n if vacuous:\n vac_list.append((evt.evidx, evt.evtype.name, type(evt)))\n elif passed:\n pass_list.append((evt.evidx, evt.evtype.name, type(evt)))\n else:\n fail_list.append((evt.evidx, evt.evtype.name, type(evt), why))\n return [vac_list, pass_list, fail_list]\n","repo_name":"galmasi/python3-uefi-eventlog","sub_path":"eventlog/eventlog.py","file_name":"eventlog.py","file_ext":"py","file_size_in_byte":31147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6318718120","text":"from Ball import Ball\nfrom Physics import ConstantElectromagneticField, Charge, Drag\nfrom Simulation import Simulation\n\nnum_balls = 10\nballs = [Ball() for i in range(num_balls)]\nfor i, b in enumerate(balls):\n b.charge = -1.0e-5\n b.velocity[0] = 1.0\n b.position[1] = i * 2.0\n b.radius = 0.5\n\nphysics = [ConstantElectromagneticField(B=1.0e5),\n Drag(quadratic=0.01),\n Charge()]\n\nlim = num_balls * 2.0 + 1.0\nsimulation = Simulation(balls, physics,\n limits=[[-lim, lim],\n [-0.5 * lim, 1.5 * lim]])\nsimulation.time_step = 0.2\nsimulation.num_time_steps = 1001\nsimulation.max_dv = 1.0e10\nsimulation.visualization_step = 5\n\nsimulation.run()\n","repo_name":"brbass/ball_physics","sub_path":"MagneticRotation.py","file_name":"MagneticRotation.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36888798096","text":"# It will loop though the whole array to get desired number\n# May be it can find in first, middle or last\n# But the worst case is that it can check the last item of the array\n\n# So the loop/operations are dependent on the length of array\n# That's why time complexity is O(n)\n\n# Space complexity is O(1). Because it's constant or not dependent on nth value.\n\n\narr = [385, 25, 39, 57, 285, 93, 24]\n\ndef linear_search(inputArr, searchItem):\n for i in range(0,len(inputArr)):\n if(inputArr[i] == searchItem):\n return i\n return -1\n\n\nresult = linear_search(arr, 93)\n\nif result == -1:\n print(\"Item not found\")\nelse:\n print(\"Item's index is: \" + str(result))","repo_name":"ShrikantaMazumder/alogorithms-and-data-structure","sub_path":"1.searching_algorithm/linear_search.py","file_name":"linear_search.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71595466506","text":"from app.auth import auth\nfrom flask import render_template, redirect, url_for, flash, request\nfrom app.models import User, db\nfrom app.auth.forms import LoginForm, RegisterForm, ValidationError\nfrom flask_login import login_user, logout_user, login_required\n\n\n@auth.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user is not None and user.verify_password(form.password.data):\n login_user(user, form.remember_btn.data)\n '''login_user регистрирует пользователя в сессии, второй аргумент запоминает данного пользователя даже после перезагрузки сайт,\n если чекбокс показывает True, иначе показывает значение False\n '''\n if request.args.get(\"next\"):\n return redirect(request.args.get(\"next\"))\n '''аргументы запроса, если есть аргумент next, то переадресирует на next, если его нет,\n то на главную страницу'''\n return redirect(url_for(\"main.index\"))\n flash(\"Неправильный логин или пароль\")\n return render_template(\"auth/login.html\", form=form)\n \"в данном случае сенс пользователя сохраняется , а значение True сохраняет значения в куках\" \\\n \"браузера пользователя, из-за чего он может перезайти заново на сайт с сохранёнными данными , с помощью чего\" \\\n \"можно будет предотвратить прерванный сеанс\"\n\n\n@auth.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n flash(\"Вы вышли со своего аккаунта\")\n return redirect(url_for(\"main.index\"))\n\n\n@auth.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n try:\n form.validate_username(form.username)\n form.validate_email(form.email)\n user = User(email=form.email.data,\n username=form.username.data,\n password=form.password.data)\n db.session.add(user)\n flash(\"Вы успешно зарегистрировались\")\n return redirect(url_for(\"auth.login\"))\n except ValidationError:\n return redirect(url_for(\"auth.register\"))\n\n return render_template(\"auth/register.html\", form=form)\n","repo_name":"LewiysArito/website","sub_path":"Project_8/app/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22560483418","text":"from pathlib import Path\nfrom subprocess import check_call\n\nfrom git_graph_branch.git import Branch, Commit\n\nfrom .utils import git_test_commit\n\n\ndef test_simple_names(repo: Path) -> None:\n commit = git_test_commit()\n check_call([\"git\", \"checkout\", \"main\", \"-b\", \"feature\"])\n\n assert Branch(\"main\").commit == Commit(commit)\n assert Branch(\"feature\").commit == Commit(commit)\n\n\ndef test_hash_in_name(repo: Path) -> None:\n commit = git_test_commit()\n check_call([\"git\", \"checkout\", \"main\", \"-b\", \"foo#bar\"])\n assert Branch(\"foo#bar\").commit == Commit(commit)\n\n\ndef test_quote_in_name(repo: Path) -> None:\n commit = git_test_commit()\n check_call([\"git\", \"checkout\", \"main\", \"-b\", 'foo\"bar'])\n assert Branch('foo\"bar').commit == Commit(commit)\n\n\ndef test_slash_in_name(repo: Path) -> None:\n commit = git_test_commit()\n check_call([\"git\", \"checkout\", \"main\", \"-b\", \"bug/101\"])\n assert Branch(\"bug/101\").commit == Commit(commit)\n","repo_name":"alicederyn/git-graph-branch","sub_path":"test/unit/git/test_branch_commit.py","file_name":"test_branch_commit.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23841138707","text":"import copy\ninstructions = []\nline_number = 0\nnop_jmp = []\n\nwith open(\"input.txt\", \"r\") as fp:\n for line in fp.readlines():\n if line != '\\n':\n instr = line.split(' ')\n instructions.append({\n 'code': instr[0],\n 'value': int(instr[1]),\n })\n if instructions[-1]['code'] != 'acc':\n nop_jmp.append(line_number)\n line_number += 1\n\ndef brute_force(instructions):\n\n loop = False\n visited = {}\n\n pointer = 0\n accumulator = 0\n while pointer < len(instructions):\n\n if pointer in visited:\n loop = True\n break\n \n if instructions[pointer]['code'] == 'nop':\n next_instr = pointer + 1\n if instructions[pointer]['code'] == 'jmp':\n next_instr = pointer + instructions[pointer]['value']\n if instructions[pointer]['code'] == 'acc':\n accumulator += instructions[pointer]['value']\n next_instr = pointer + 1\n\n visited[pointer] = 1\n\n pointer = next_instr\n\n if not loop:\n print(accumulator)\n\nfor nj in nop_jmp:\n instr = copy.deepcopy(instructions)\n instr[nj]['code'] = 'nop' if instr[nj]['code'] == 'jmp' else 'jmp'\n brute_force(instr)\n","repo_name":"samdubusc/adventofcode","sub_path":"2020/08/day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43225555692","text":"'''Plugin provides arbitrary binding of uzbl events to uzbl commands.\n\nFormatting options:\n %s = space separated string of the arguments\n %r = escaped and quoted version of %s\n %1 = argument 1\n %2 = argument 2\n %n = argument n\n\nUsage:\n event ON_EVENT LINK_HOVER set selected_uri $1\n --> LINK_HOVER http://uzbl.org/\n <-- set selected_uri http://uzbl.org/\n\n event ON_EVENT CONFIG_CHANGED print Config changed: %1 %2\n --> CONFIG_CHANGED selected_uri http://uzbl.org/\n <-- print Config changed: selected_uri http://uzbl.org/\n'''\n\nimport fnmatch\nfrom functools import partial\n\nfrom uzbl.arguments import splitquoted\nfrom .cmd_expand import send_user_command\nfrom uzbl.ext import PerInstancePlugin\n\n\ndef match_args(pattern, args):\n if len(pattern) > len(args):\n return False\n for p, a in zip(pattern, args):\n if not fnmatch.fnmatch(a, p):\n return False\n return True\n\n\nclass OnEventPlugin(PerInstancePlugin):\n CONFIG_SECTION = 'on_event'\n\n def __init__(self, uzbl):\n '''Export functions and connect handlers to events.'''\n super(OnEventPlugin, self).__init__(uzbl)\n\n self.events = {}\n\n uzbl.connect('ON_EVENT', self.parse_on_event)\n\n def event_handler(self, *args, **kargs):\n '''This function handles all the events being watched by various\n on_event definitions and responds accordingly.'''\n\n # Could be connected to a EM internal event that can use anything as\n # arguments\n if len(args) == 1 and isinstance(args[0], str):\n args = splitquoted(args[0])\n\n event = kargs['on_event']\n if event not in self.events:\n return\n\n commands = self.events[event]\n for cmd, pattern in commands:\n if not pattern or match_args(pattern, args):\n send_user_command(self.uzbl, cmd, args)\n\n def on_event(self, event, pattern, cmd):\n '''Add a new event to watch and respond to.'''\n\n event = event.upper()\n self.logger.debug('new event handler %r %r %r', event, pattern, cmd)\n if event not in self.events:\n self.uzbl.connect(event,\n partial(self.event_handler, on_event=event))\n self.events[event] = []\n\n if isinstance(cmd, str):\n cmd = (cmd,)\n\n cmds = self.events[event]\n if cmd not in cmds:\n cmds.append((cmd, pattern))\n\n def parse_on_event(self, args):\n '''Parse ON_EVENT events and pass them to the on_event function.\n\n Syntax: \"event ON_EVENT <[ pattern ]> commands\".'''\n\n args = splitquoted(args)\n assert args, 'missing on event arguments'\n\n # split into event name, optional argument pattern and command\n event = args[0]\n pattern = []\n if args[1] == '[':\n for i, arg in enumerate(args[2:]):\n if arg == ']':\n break\n pattern.append(arg)\n command = tuple(args[3+i:])\n else:\n command = tuple(args[1:])\n\n assert event and command, 'missing on event command'\n self.on_event(event, pattern, command)\n\n def cleanup(self):\n self.events.clear()\n super(OnEventPlugin, self).cleanup()\n","repo_name":"uzbl/uzbl","sub_path":"uzbl/plugins/on_event.py","file_name":"on_event.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":996,"dataset":"github-code","pt":"81"} +{"seq_id":"41095196830","text":"\r\n\r\nimport wx\r\n\r\nif wx.Platform == '__WXMSW__':\r\n from wx.lib.pdfwin import PDFWindow\r\n\r\n\r\nclass MyPanel(wx.Panel):\r\n def __init__(self, parent):\r\n \r\n self.parent = parent\r\n \r\n wx.Panel.__init__(self, parent, id=-1)\r\n self.pdf = None\r\n\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n btnSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n\r\n self.pdf = PDFWindow(self, style=wx.SUNKEN_BORDER)\r\n\r\n sizer.Add(self.pdf, proportion=1, flag=wx.EXPAND)\r\n\r\n btn = wx.Button(self, wx.NewId(), \"Open PDF File\")\r\n self.Bind(wx.EVT_BUTTON, self.OnOpenButton, btn)\r\n btnSizer.Add(btn, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)\r\n\r\n btn = wx.Button(self, wx.NewId(), \"Previous Page\")\r\n self.Bind(wx.EVT_BUTTON, self.OnPrevPageButton, btn)\r\n btnSizer.Add(btn, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)\r\n\r\n btn = wx.Button(self, wx.NewId(), \"Next Page\")\r\n self.Bind(wx.EVT_BUTTON, self.OnNextPageButton, btn)\r\n btnSizer.Add(btn, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)\r\n\r\n btnSizer.Add((50,-1), proportion=2, flag=wx.EXPAND)\r\n sizer.Add(btnSizer, proportion=0, flag=wx.EXPAND)\r\n\r\n self.SetSizer(sizer)\r\n self.SetAutoLayout(True)\r\n\r\n def OnOpenButton(self, event):\r\n # make sure you have PDF files available on your drive\r\n dlg = wx.FileDialog(self, wildcard=\"*.pdf\")\r\n if dlg.ShowModal() == wx.ID_OK:\r\n wx.BeginBusyCursor()\r\n self.pdf.LoadFile(dlg.GetPath())\r\n wx.EndBusyCursor()\r\n dlg.Destroy()\r\n\r\n def OnPrevPageButton(self, event):\r\n self.parent.SaveToFile(dlg.GetPath(), wx.BITMAP_TYPE_PNG)\r\n self.pdf.gotoPreviousPage()\r\n\r\n def OnNextPageButton(self, event):\r\n self.pdf.gotoNextPage()\r\n\r\n\r\napp = wx.PySimpleApp()\r\n# create window/frame, no parent, -1 is default ID, title, size\r\nframe = wx.Frame(None, -1, \"PDFWindow\", size = (640, 480))\r\n# make an instance of the class\r\nMyPanel(frame)\r\n# show the frame\r\n\r\nframe.Show(True)\r\n\r\n# start the event loop\r\napp.MainLoop()\r\n\r\n","repo_name":"jhkoivis/dvelib","sub_path":"alpha/pdfToBibtex/attic/parseTxt.py","file_name":"parseTxt.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"3516343195","text":"import tensorflow as tf\nfrom transformers import BertTokenizer, TFBertForSequenceClassification\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\n# Load the dataset\ndata = pd.read_csv(\"names_to_train.csv\", header=None)\ndf = pd.DataFrame(data)\n\n# Prepare the data\nnames = data[0].values\nlabels = data[1].values\nnames = names.tolist()\nlabels = labels.tolist()\n\n# Convert labels to numerical values\nlabel_to_int = {\"male\": 0, \"female\": 1}\nlabel_ints = [label_to_int[label] for label in labels]\n\n# Split the dataset into training and validation sets\ntrain_names, val_names, train_labels, val_labels = train_test_split(names, label_ints, test_size=0.2, random_state=42)\n\n# Convert labels and encodings to NumPy arrays with appropriate data types\ntrain_labels = np.array(train_labels, dtype=np.int32)\nval_labels = np.array(val_labels, dtype=np.int32)\n\n# Load the pre-trained BERT model and tokenizer\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nmodel = TFBertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\n\n# Tokenize the input names\ntrain_encodings = tokenizer(train_names, truncation=True, padding=True)\nval_encodings = tokenizer(val_names, truncation=True, padding=True)\n\ntrain_encodings = {\n 'input_ids': np.array(train_encodings['input_ids'], dtype=np.int32),\n 'attention_mask': np.array(train_encodings['attention_mask'], dtype=np.int32),\n 'token_type_ids': np.array(train_encodings['token_type_ids'], dtype=np.int32)\n}\n\nval_encodings = {\n 'input_ids': np.array(val_encodings['input_ids'], dtype=np.int32),\n 'attention_mask': np.array(val_encodings['attention_mask'], dtype=np.int32),\n 'token_type_ids': np.array(val_encodings['token_type_ids'], dtype=np.int32)\n}\n\n# Create TensorFlow datasets\ntrain_dataset = tf.data.Dataset.from_tensor_slices((\n dict(train_encodings),\n train_labels\n)).batch(16)\n\nval_dataset = tf.data.Dataset.from_tensor_slices((\n dict(val_encodings),\n val_labels\n)).batch(16)\n\n# Fine-tune the BERT model\noptimizer = tf.keras.optimizers.Adam(learning_rate=1e-5)\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\nmetric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')\n\nmodel.compile(optimizer=optimizer, loss=loss, metrics=[metric])\nmodel.fit(train_dataset, epochs=5, validation_data=val_dataset)\n\n# Save the trained model weights\nmodel.save_weights('gender_model_weights.h5')\n\n# Load the trained model\nmodel = TFBertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\nmodel.load_weights('gender_model_weights.h5')\n\n# Function to predict gender based on a name\ndef predict_gender(name):\n # Tokenize the input name\n encoding = tokenizer([name], truncation=True, padding=True)\n\n # Create TensorFlow dataset\n input_dataset = tf.data.Dataset.from_tensor_slices(dict(encoding)).batch(1)\n\n # Make predictions using the trained model\n predictions = model.predict(input_dataset)\n predicted_label = tf.argmax(predictions.logits, axis=1)[0].numpy()\n\n # Map the predicted label to gender\n gender = \"male\" if predicted_label == 0 else \"female\"\n\n return gender\n\n# Get gender from user input\nname = input(\"Enter a name: \")\npredicted_gender = predict_gender(name)\nprint(f\"The predicted gender for the name '{name}' is: {predicted_gender}\")\n","repo_name":"mohi0017/Mohi-s-Bot","sub_path":"code_to_train_model.py","file_name":"code_to_train_model.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28632227680","text":"# import modules\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# create numpy arrays\nnums = np.array([0.5, 0.7, 1.0, 1.2, 1.3, 2.1])\nbins = np.array([0, 1, 2, 3])\n\n# print output\nprint(\"nums: \", nums)\nprint(\"bins: \", bins)\nprint(\"Result:\", np.histogram(nums, bins))\n\n# creating axes labels and title\nplt.xlabel(\"nums\")\nplt.ylabel(\"bins\")\nplt.title(\"Histogram of nums against bins\")\n# create matplotlib histogram\nplt.hist(nums, bins=bins)\n# display histogram\nplt.show()\n\n\n# Write a NumPy program to compute the histogram of nums against the bins. Label your x-axis with nums and y-axis with\n# bins.\n# Add a title to the histogram: Histogram of nums against bins.\n# Sample Output:\n# nums: [0.5 0.7 1. 1.2 1.3 2.1]\n# bins: [0 1 2 3]\n# Result: (array([2, 3, 1], dtype=int64), array([0, 1, 2, 3]))\n","repo_name":"JamBro22/numpy-exercises","sub_path":"numpy_exercise1.py","file_name":"numpy_exercise1.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21006322470","text":"import os\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'\nimport pygame\nimport logging\nfrom Client import Game, Constants\nfrom Shared.Enums import STAGES\n\ndef game():\n pygame.fastevent.init()\n logging.basicConfig(level=logging.INFO)\n game = Game.Game()\n\n clockObj = pygame.time.Clock()\n while game.gameStage != STAGES.CLOSING or not game.session.properlyClosed:\n for event in pygame.fastevent.get():\n if event.type == pygame.QUIT:\n game.newGameStage(STAGES.CLOSING)\n elif event.type == pygame.KEYDOWN:\n if game.gameStage in [STAGES.WON, STAGES.LOST]:\n game.newGameStage(STAGES.CLOSING)\n elif event.key == pygame.K_r:\n game.rotateShip()\n elif event.key == pygame.K_q:\n game.changeCursor()\n elif event.key == pygame.K_g:\n game.toggleGameReady()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n game.mouseClick(event.pos, rightClick=False)\n elif event.button == 3:\n game.mouseClick(event.pos, rightClick=True)\n elif event.button == 4: # scroll up\n game.changeShipSize(+1)\n elif event.button == 5: # scroll down\n game.changeShipSize(-1)\n \n game.handleRequests()\n game.drawGame()\n clockObj.tick(Constants.FPS)\n\n\nif __name__ == '__main__':\n game()","repo_name":"Michal-Martinek/BattleShips","sub_path":"BattleShips.py","file_name":"BattleShips.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"70172772744","text":"from __future__ import absolute_import, division, print_function\nfrom .config import Config\nfrom .depth_cloud import DepthCloud\nfrom .filters import filter_eigenvalue, filter_depth, filter_grid\nfrom .nearest_neighbors import nearest_neighbors\nfrom .point_cloud import PointCloud\nfrom .utils import timing, trace, normalize\nfrom enum import Enum\nimport numpy as np\nfrom numpy.polynomial import Polynomial\nfrom numpy.lib.recfunctions import structured_to_unstructured\nimport torch\nfrom scipy.spatial import cKDTree\nfrom pytorch3d.ops.knn import knn_points\nimport warnings\n\n\n__all__ = [\n 'batch_loss',\n 'create_loss',\n 'loss_by_name',\n 'min_eigval_loss',\n 'neighbor_cov',\n 'neighbor_fun',\n 'reduce',\n 'trace_loss',\n 'icp_loss',\n 'point_to_point_dist',\n 'point_to_plane_dist'\n]\n\n\nclass Reduction(Enum):\n NONE = 'none'\n MEAN = 'mean'\n SUM = 'sum'\n\n\ndef eigh3_deledalle(mat, normalize=True, sort=True):\n # https://hal.archives-ouvertes.fr/hal-01501221/document\n # (a, d, f), (_, b, e), (_, _, c) = mat\n # a, b, ..., f are N-dimensional tensors.\n a = mat[..., 0, 0]\n b = mat[..., 1, 1]\n c = mat[..., 2, 2]\n d = mat[..., 0, 1]\n e = mat[..., 1, 2]\n f = mat[..., 0, 2]\n\n # (8)\n # N-dimensional tensors\n x1 = a**2 + b**2 + c**2 - a * b - a * c - b * c + 3 * (d**2 + f**2 + e**2)\n x2 = -(2 * a - b - c) * (2 * b - a - c) * (2 * c - a - b) \\\n + 9 * ((2 * c - a - b) * d**2 + (2 * b - a - c) * f**2 + (2 * a - b - c) * e**2) \\\n - 54 * (d * e * f)\n \n # (9)\n # TODO: Convert to mask for individual indices.\n # if x2 > 0:\n # phi = torch.atan(torch.sqrt(4 * x1**3 - x2**2) / x2)\n # elif x2 == 0:\n # phi = torch.pi / 2\n # elif x2 < 0:\n # phi = torch.atan(torch.sqrt(4 * x1**3 - x2**2) / x2) + torch.pi\n # # Use 4-quadrant arctan2 to avoid switch.\n # N-dimensional tensor\n phi = torch.atan2(torch.sqrt(4 * x1**3 - x2**2), x2)\n \n # (7) Eigenvalues\n # N-dimensional tensors\n eigval1 = (a + b + c - 2 * torch.sqrt(x1) * torch.cos(phi / 3)) / 3\n eigval2 = (a + b + c + 2 * torch.sqrt(x1) * torch.cos((phi - torch.pi) / 3)) / 3\n eigval3 = (a + b + c + 2 * torch.sqrt(x1) * torch.cos((phi + torch.pi) / 3)) / 3\n \n # (11)\n # TODO: Handle zero denominators.\n # N-dimensional tensors\n m1 = (d * (c - eigval1) - e * f) / (f * (b - eigval1) - d * e)\n m2 = (d * (c - eigval2) - e * f) / (f * (b - eigval2) - d * e)\n m3 = (d * (c - eigval3) - e * f) / (f * (b - eigval3) - d * e)\n\n # (10) Eigenvectors\n # N-by-3 tensors\n eigvec1 = torch.stack([(eigval1 - c - e * m1) / f, m1, torch.ones_like(m1)], dim=-1)\n eigvec2 = torch.stack([(eigval2 - c - e * m2) / f, m2, torch.ones_like(m2)], dim=-1)\n eigvec3 = torch.stack([(eigval3 - c - e * m3) / f, m3, torch.ones_like(m3)], dim=-1)\n \n # Stack eigenvalues and vectors to tensors.\n # N-by-3\n eigvals = torch.stack([eigval1, eigval2, eigval3], dim=-1)\n # N-by-3-by-3\n eigvecs = torch.stack([eigvec1, eigvec2, eigvec3], dim=-1)\n\n # Normalize eigenvectors.\n if normalize:\n eigvecs = eigvecs / torch.linalg.norm(eigvecs, dim=1, keepdim=True)\n\n # Sort eigenvalues and eigenvectors.\n if sort:\n eigvals, ind = torch.sort(eigvals, dim=-1)\n eigvecs = torch.gather(eigvecs, 2, ind.unsqueeze(1).expand(eigvecs.shape))\n \n return eigvals, eigvecs\n\n\ndef eigh3(mat):\n \"\"\"Analytic eigenvalue decomposition of 3-by-3 symmetric matrices.\n\n Symmetric matrices have real eigenvalues and orthogonal eigenvectors.\n For real symmetric matrices, the eigenvectors are also real.\n\n Matrix A = U * diag(L) * U.T,\n where U is the matrix of eigenvectors and L is the vector of eigenvalues.\n\n :param mat: ...-by-3-by-3 3D covariance matrices.\n :return: Eigenvalues and eigenvectors.\n \"\"\"\n assert isinstance(mat, torch.Tensor)\n assert mat.shape[-2] == mat.shape[-1]\n assert mat.shape[-1] == 3\n\n return eigh3_deledalle(mat)\n\n\ndef reduce(x, reduction=Reduction.MEAN, weights=None, only_finite=False, skip_nans=False):\n # assert reduction in ('none', 'mean', 'sum')\n assert reduction in Reduction\n\n keep = None\n if only_finite:\n keep = x.isfinite()\n elif skip_nans:\n keep = ~x.isnan()\n if keep is not None:\n if weights:\n weights = weights[keep]\n x = x[keep]\n\n if reduction == Reduction.MEAN:\n if weights is None:\n x = x.mean()\n else:\n x = (weights * x).sum() / weights.sum()\n elif reduction == Reduction.SUM:\n if weights is None:\n x = x.sum()\n else:\n x = (weights * x).sum()\n\n return x\n\n\ndef neighbor_fun(points, fun, query=None, k=None, r=None):\n assert isinstance(points, torch.Tensor)\n assert isinstance(query, torch.Tensor)\n assert callable(fun)\n assert k is None or (isinstance(k, int) and k >= 1)\n assert r is None or (isinstance(r, float) and r > 0.0)\n\n dist, ind = nearest_neighbors(points, query, k=k, r=r)\n\n # TODO: Allow batch dimension.\n # n = query.shape[-1]\n n = query.shape[0]\n result = []\n for i in range(n):\n nn = torch.index_select(points, 0, torch.tensor(ind[i]))\n q = query[i:i + 1]\n result.append(fun(nn, q))\n\n return result\n\n\ndef neighbor_cov(points, query=None, k=None, r=None, correction=1):\n fun = lambda p, q: torch.cov(p.transpose(-1, -2), correction=correction)\n cov = neighbor_fun(points, fun, query=query, k=k, r=r)\n cov = torch.stack(cov)\n return cov\n\n\ndef batch_loss(loss_fun, clouds, masks=None, offsets=None, reduction=Reduction.MEAN,\n only_finite=False, skip_nans=False,\n **kwargs):\n \"\"\"General batch loss of a sequence of clouds.\n\n :param loss_fun: Loss function.\n :param clouds: Sequence of clouds.\n :param masks: Sequence of masks, optional.\n :param offsets: Sequences of offset clouds, optional.\n :param reduction: Loss reduction mode.\n :param kwargs: Other key-value loss arguments.\n :return: Reduced loss and loss clouds.\n \"\"\"\n assert callable(loss_fun)\n assert isinstance(clouds, (list, tuple))\n if masks is None:\n masks = len(clouds) * [None]\n if offsets is None:\n offsets = len(clouds) * [None]\n assert isinstance(masks, (list, tuple))\n assert len(masks) == len(clouds)\n assert isinstance(offsets, (list, tuple))\n assert len(offsets) == len(clouds)\n\n losses, loss_clouds = [], []\n for cloud, mask, offset in zip(clouds, masks, offsets):\n loss, loss_cloud = loss_fun(cloud, mask=mask, offset=offset, reduction=Reduction.NONE, **kwargs)\n losses.append(loss)\n loss_clouds.append(loss_cloud)\n\n loss = reduce(torch.cat(losses), reduction=reduction,\n only_finite=only_finite, skip_nans=skip_nans)\n return loss, loss_clouds\n\n\ndef min_eigval_loss(cloud, mask=None, offset=None, sqrt=False, normalization=False, reduction=Reduction.MEAN,\n inlier_max_loss=None, inlier_ratio=1.0, inlier_loss_mult=1.0,\n only_finite=False, skip_nans=False, **kwargs):\n \"\"\"Map consistency loss based on the smallest eigenvalue.\n\n Pre-filter cloud before, or set the mask to select points to be used in\n loss reduction. In general, surfaces for which incidence angles can be\n reliably estimated should be selected, typically planar regions.\n\n :param cloud:\n :param mask:\n :param offset: Offset point-wise loss values, optional.\n :param sqrt: Whether to use square root of eigenvalue.\n :param normalization: Whether to normalize minimum eigenvalue by total variance.\n :param reduction:\n :return:\n \"\"\"\n # If a batch of clouds is (as a list), process them separately,\n # and reduce point-wise loss in the end by delegating to batch_loss.\n if isinstance(cloud, (list, tuple)):\n return batch_loss(min_eigval_loss, cloud, masks=mask, offsets=offset, sqrt=sqrt, normalization=normalization,\n reduction=reduction,\n inlier_max_loss=inlier_max_loss, inlier_ratio=inlier_ratio, inlier_loss_mult=inlier_loss_mult,\n only_finite=only_finite, skip_nans=skip_nans)\n\n assert isinstance(cloud, (DepthCloud, PointCloud))\n assert cloud.eigvals is not None\n assert offset is None or isinstance(offset, (DepthCloud, PointCloud))\n\n if mask is not None:\n print('Using %.3f valid entries from input cloud.' % mask.float().mean())\n cloud = cloud[mask]\n mask = None\n\n eigvals = cloud.eigvals\n loss = eigvals[:, 0]\n\n if normalization:\n loss = loss / eigvals.sum(dim=-1).clamp(min=1e-6)\n\n if inlier_ratio < 1.0:\n assert offset is None\n # Sort loss values and select inlier_ratio of them.\n loss_quantile = torch.quantile(loss, inlier_ratio, dim=0)\n print('Loss %.3g-quantile: %.3g.' % (inlier_ratio, loss_quantile.item()))\n if inlier_loss_mult != 1.0:\n loss_quantile = inlier_loss_mult * loss_quantile\n print('Multiplied %.3g-quantile: %.3g.' % (inlier_ratio, loss_quantile.item()))\n if inlier_max_loss is None:\n inlier_max_loss = loss_quantile\n else:\n inlier_max_loss = torch.min(inlier_max_loss, loss_quantile)\n\n if inlier_max_loss is not None:\n assert offset is None\n mask = (loss <= inlier_max_loss)\n print('Using %i (%.3g) inliers with loss <= %.3g.'\n % (mask.sum().item(), mask.float().mean().item(), inlier_max_loss.item()))\n\n if mask is not None:\n cloud = cloud[mask]\n loss = loss[mask]\n\n # Offset the loss using loss computed on local clouds.\n if offset is not None:\n loss = loss - offset\n\n # Ensure positive loss.\n loss = torch.relu(loss)\n\n if sqrt:\n loss = torch.sqrt(loss)\n\n cloud = cloud.copy()\n cloud.loss = loss\n\n loss = reduce(loss, reduction=reduction,\n only_finite=only_finite, skip_nans=skip_nans)\n return loss, cloud\n\n\ndef trace_loss(cloud, mask=None, offset=None, sqrt=None, reduction=Reduction.MEAN,\n inlier_max_loss=None, inlier_ratio=1.0, inlier_loss_mult=1.0,\n only_finite=False, skip_nans=False,\n **kwargs):\n \"\"\"Map consistency loss based on the trace of covariance matrix.\n\n Pre-filter cloud before, or set the mask to select points to be used in\n loss reduction. In general, surfaces for which incidence angles can be\n reliably estimated should be selected, typically planar regions.\n\n :param cloud:\n :param mask:\n :param offset: Source cloud to offset point-wise loss values, optional.\n :param sqrt: Whether to use square root of trace.\n :param reduction:\n :return:\n \"\"\"\n # If a batch of clouds is (as a list), process them separately,\n # and reduce point-wise loss in the end by delegating to batch_loss.\n if isinstance(cloud, (list, tuple)):\n return batch_loss(trace_loss, cloud, masks=mask, offsets=offset, sqrt=sqrt, reduction=reduction,\n inlier_max_loss=inlier_max_loss, inlier_ratio=inlier_ratio, inlier_loss_mult=inlier_loss_mult,\n only_finite=only_finite, skip_nans=skip_nans)\n\n assert isinstance(cloud, (DepthCloud, PointCloud))\n assert cloud.cov is not None\n assert offset is None or isinstance(offset, (DepthCloud, PointCloud))\n\n if mask is not None:\n print('Using %.3f valid entries from input cloud.' % mask.float().mean())\n cloud = cloud[mask]\n mask = None\n\n cov = cloud.cov\n loss = trace(cov)\n\n if inlier_ratio < 1.0:\n assert offset is None\n # Sort loss values and select inlier_ratio of them.\n loss_quantile = torch.quantile(loss, inlier_ratio, dim=0)\n print('Loss %.3g-quantile: %.3g.' % (inlier_ratio, loss_quantile.item()))\n if inlier_loss_mult != 1.0:\n loss_quantile = inlier_loss_mult * loss_quantile\n print('Multiplied %.3g-quantile: %.3g.' % (inlier_ratio, loss_quantile.item()))\n if inlier_max_loss is None:\n inlier_max_loss = loss_quantile\n else:\n inlier_max_loss = torch.min(inlier_max_loss, loss_quantile)\n\n if inlier_max_loss is not None:\n assert offset is None\n mask = (loss <= inlier_max_loss)\n print('Using %i (%.3g) inliers with loss <= %.3g.'\n % (mask.sum().item(), mask.float().mean().item(), inlier_max_loss.item()))\n\n if mask is not None:\n cloud = cloud[mask]\n loss = loss[mask]\n\n # Offset the loss using loss computed on local clouds.\n if offset is not None:\n loss = loss - offset\n\n # Ensure positive loss.\n loss = torch.relu(loss)\n\n if sqrt:\n loss = torch.sqrt(loss)\n\n cloud = cloud.copy()\n cloud.loss = loss\n\n loss = reduce(loss, reduction=reduction, only_finite=only_finite, skip_nans=skip_nans)\n return loss, cloud\n\n\ndef icp_loss(clouds, poses=None, model=None, masks=None, **kwargs):\n \"\"\"ICP-like point to plane loss.\n\n :param clouds: List of lists of clouds :) Individual scans from different data sequences.\n :param poses: List od lists of poses for each point cloud scan.\n :param masks:\n :return:\n \"\"\"\n transformed_clouds = clouds\n if model is not None:\n transformed_clouds = [[model(c) for c in seq_clouds] for seq_clouds in transformed_clouds]\n if poses is not None:\n transformed_clouds = [[c.transform(p) for c, p in zip(seq_clouds, seq_poses)]\n for seq_clouds, seq_poses in zip(transformed_clouds, poses)]\n loss = 0.\n loss_cloud = []\n loss_fun = point_to_plane_dist if kwargs['icp_point_to_plane'] else point_to_point_dist\n\n for i in range(len(transformed_clouds)):\n seq_trans_clouds = transformed_clouds[i]\n seq_masks = None if masks is None else masks[i]\n loss_seq = loss_fun(seq_trans_clouds, masks=seq_masks, **kwargs)\n loss = loss + loss_seq\n\n cloud = DepthCloud.concatenate(seq_trans_clouds)\n cloud.loss = loss\n loss_cloud.append(cloud)\n\n loss = loss / len(transformed_clouds)\n\n return loss, loss_cloud\n\n\ndef point_to_plane_dist(clouds: list, icp_inlier_ratio=0.5, masks=None, differentiable=True, verbose=False, **kwargs):\n \"\"\"ICP-like point to plane distance.\n\n Computes point to plane distances for consecutive pairs of point cloud scans, and returns the average value.\n\n :param clouds: List of clouds. Individual scans from a data sequences.\n :param masks: List of tuples masks[i] = (mask1, mask2) where mask1 defines indices of points from 1st point cloud\n in a pair that intersect (close enough) with points from 2nd cloud in the pair,\n mask2 is list of indices of intersection points from the 2nd point cloud in a pair.\n :param icp_inlier_ratio: Ratio of inlier points between a two pairs of neighboring clouds.\n :param differentiable: Whether to use differentiable method of finding neighboring points (from Pytorch3d: slow on CPU)\n or from scipy (faster but not differentiable).\n :param verbose:\n :return:\n \"\"\"\n assert 0.0 <= icp_inlier_ratio <= 1.0\n if masks is not None:\n assert len(clouds) == len(masks) + 1\n # print('Using precomputed intersection masks for point to plane loss')\n point2plane_dist = 0.0\n n_pairs = len(clouds) - 1\n for i in range(n_pairs):\n cloud1 = clouds[i]\n assert cloud1.normals is not None, \"Cloud must have normals computed to estimate point to plane distance\"\n cloud2 = clouds[i + 1]\n\n points1 = cloud1.to_points() if cloud1.points is None else cloud1.points\n points2 = cloud2.to_points() if cloud2.points is None else cloud2.points\n assert not torch.all(torch.isnan(points1))\n assert not torch.all(torch.isnan(points2))\n points1 = torch.as_tensor(points1, dtype=torch.float)\n points2 = torch.as_tensor(points2, dtype=torch.float)\n\n # find intersections between neighboring point clouds (1 and 2)\n if masks is None:\n if not differentiable:\n tree = cKDTree(points2)\n dists, ids = tree.query(points1, k=1)\n else:\n dists, ids, _ = knn_points(points1[None], points2[None], K=1)\n dists = torch.sqrt(dists).squeeze()\n ids = ids.squeeze()\n dists = torch.as_tensor(dists)\n dist_th = torch.nanquantile(dists, icp_inlier_ratio)\n mask1 = dists <= dist_th\n mask2 = ids[mask1]\n inl_err = dists[mask1].mean()\n else:\n mask1, mask2 = masks[i]\n inl_err = torch.tensor(-1.0)\n\n points1_inters = points1[mask1]\n assert len(points1_inters) > 0, \"Point clouds do not intersect. Try to sample lidar scans more frequently\"\n points2_inters = points2[mask2]\n\n # point to plane distance 1 -> 2\n normals1_inters = cloud1.normals[mask1]\n # assert np.allclose(np.linalg.norm(normals1_inters, axis=1), np.ones(len(normals1_inters)))\n k = torch.multiply(normals1_inters, points2_inters - points1_inters).sum(dim=-1, keepdims=True)\n points2_plane = points2_inters - k * normals1_inters\n dists_to_plane = torch.linalg.norm(points2_inters - points2_plane, dim=-1)\n dist12 = dists_to_plane.mean()\n\n # point to plane distance 2 -> 1\n normals2_inters = cloud2.normals[mask2]\n # assert np.allclose(np.linalg.norm(normals2_inters, axis=1), np.ones(len(normals2_inters)))\n k = torch.multiply(normals2_inters, points1_inters - points2_inters).sum(dim=-1, keepdims=True)\n points1_plane = points1_inters - k * normals2_inters\n dists_to_plane = torch.linalg.norm(points1_inters - points1_plane, dim=-1)\n dist21 = dists_to_plane.mean()\n\n point2plane_dist += 0.5 * (dist12 + dist21)\n\n if inl_err > 0.3:\n warnings.warn('ICP inliers error is too big: %.3f (> 0.3) [m] for pairs (%i, %i)' % (inl_err, i, i + 1))\n\n if verbose:\n print('Mean point to plane distance: %.3f [m] for scans: (%i, %i), inliers error: %.6f' %\n (point2plane_dist.item(), i, i+1, inl_err.item()))\n\n point2plane_dist = torch.as_tensor(point2plane_dist / n_pairs)\n\n return point2plane_dist\n\n\ndef point_to_point_dist(clouds: list, icp_inlier_ratio=0.5, masks=None, differentiable=True, verbose=False, **kwargs):\n \"\"\"ICP-like point to point distance.\n\n Computes point to point distances for consecutive pairs of point cloud scans, and returns the average value.\n\n :param clouds: List of clouds. Individual scans from a data sequences.\n :param masks: List of tuples masks[i] = (mask1, mask2) where mask1 defines indices of points from 1st point cloud\n in a pair that intersect (close enough) with points from 2nd cloud in the pair,\n mask2 is list of indices of intersection points from the 2nd point cloud in a pair.\n :param icp_inlier_ratio: Ratio of inlier points between a two pairs of neighboring clouds.\n :param differentiable: Whether to use differentiable method of finding neighboring points (from Pytorch3d: slow on CPU)\n or from scipy (faster but not differentiable).\n :param verbose:\n :return:\n \"\"\"\n assert 0.0 <= icp_inlier_ratio <= 1.0\n if masks is not None:\n assert len(clouds) == len(masks) + 1\n # print('Using precomputed intersection masks for point to plane loss')\n point2point_dist = 0.0\n n_pairs = len(clouds) - 1\n for i in range(n_pairs):\n cloud1 = clouds[i]\n cloud2 = clouds[i + 1]\n\n if isinstance(cloud1, DepthCloud):\n points1 = cloud1.to_points() if cloud1.points is None else cloud1.points\n else:\n points1 = cloud1\n if isinstance(cloud2, DepthCloud):\n points2 = cloud2.to_points() if cloud2.points is None else cloud2.points\n else:\n points2 = cloud2\n points1 = torch.as_tensor(points1, dtype=torch.float)\n points2 = torch.as_tensor(points2, dtype=torch.float)\n assert not torch.all(torch.isnan(points1))\n assert not torch.all(torch.isnan(points2))\n\n # find intersections between neighboring point clouds (1 and 2)\n if masks is None:\n if not differentiable:\n tree = cKDTree(points2)\n dists, ids = tree.query(points1.detach(), k=1)\n else:\n dists, ids, _ = knn_points(points1[None], points2[None], K=1)\n dists = torch.sqrt(dists).squeeze()\n ids = ids.squeeze()\n dists = torch.as_tensor(dists)\n dist_th = torch.nanquantile(dists, icp_inlier_ratio)\n mask1 = dists <= dist_th\n mask2 = ids[mask1]\n inl_err = dists[mask1].mean()\n else:\n mask1, mask2 = masks[i]\n inl_err = torch.tensor(-1.0)\n\n points1_inters = points1[mask1]\n assert len(points1_inters) > 0, \"Point clouds do not intersect. Try to sample lidar scans more frequently\"\n points2_inters = points2[mask2]\n assert len(points2_inters) > 0, \"Point clouds do not intersect. Try to sample lidar scans more frequently\"\n\n # point to point distance\n vectors = points2_inters - points1_inters\n point2point_dist = torch.linalg.norm(vectors, dim=1).mean()\n\n if inl_err > 0.3:\n warnings.warn('ICP inliers error is too big: %.3f (> 0.3) [m] for pairs (%i, %i)' % (inl_err, i, i + 1))\n\n if verbose:\n print('Mean point to point distance: %.3f [m] for scans: (%i, %i), inliers error: %.6f' %\n (point2point_dist.item(), i, i+1, inl_err.item()))\n\n point2point_dist = torch.as_tensor(point2point_dist / n_pairs)\n\n return point2point_dist\n\n\ndef loss_by_name(name):\n assert name in ('min_eigval_loss', 'trace_loss', 'icp_loss')\n return globals()[name]\n\n\ndef create_loss(cfg: Config):\n loss = loss_by_name(cfg.loss)\n\n def loss_fun(*args, **kwargs):\n return loss(*args, **kwargs, **cfg.loss_kwargs)\n\n return loss_fun\n\n\ndef preprocess_cloud(cloud, min_depth=None, max_depth=None, grid_res=None, k=None, r=None):\n cloud = filter_depth(cloud, min=min_depth, max=max_depth, log=False)\n cloud = filter_grid(cloud, grid_res, keep='last')\n cloud.update_all(k=k, r=r)\n keep = filter_eigenvalue(cloud, 0, max=(grid_res / 5)**2, only_mask=True, log=False)\n keep = keep & filter_eigenvalue(cloud, 1, min=grid_res**2, only_mask=True, log=False)\n cloud = cloud[keep]\n cloud.update_all(k=k, r=r)\n return cloud\n\n\ndef dataset_to_cloud(ds, min_depth=None, max_depth=None, grid_res=None, k=None, r=None, device='cpu'):\n if isinstance(device, str):\n device = torch.device(device)\n clouds = []\n poses = []\n\n for cloud, pose in ds:\n cloud = DepthCloud.from_points(cloud)\n cloud.to(device)\n pose = torch.tensor(pose, device=device)\n cloud = preprocess_cloud(cloud, min_depth=min_depth, max_depth=max_depth, grid_res=grid_res, k=k, r=r)\n cloud = cloud.transform(pose)\n clouds.append(cloud)\n poses.append(pose)\n\n cloud = DepthCloud.concatenate(clouds)\n # cloud.visualize(colors='inc_angles')\n cloud.visualize(colors='z')\n cloud.update_all(k=k, r=r)\n return cloud\n\n\ndef l2_loss(dc1, dc2):\n assert dc1.points is not None\n assert dc2.points is not None\n assert len(dc1.points) == len(dc2.points)\n return torch.linalg.norm(dc1.points - dc2.points) / len(dc1.points)\n\n\ndef demo():\n from data.asl_laser import Dataset\n # ds = Dataset('apartment')\n ds = Dataset('eth')\n # ds = Dataset('gazebo_summer')\n # ds = Dataset('gazebo_winter')\n # ds = Dataset('stairs')\n # ds = ds[10:21:10]\n ds = ds[::5]\n\n min_depth = 1.0\n max_depth = 15.0\n grid_res = 0.05\n k = None\n r = 3 * grid_res\n device = torch.device('cpu')\n # device = torch.device('cuda')\n\n dc = dataset_to_cloud(ds, min_depth=min_depth, max_depth=max_depth, grid_res=grid_res, k=k, r=r,\n device=device)\n\n # Visualize incidence angle to plane distance.\n # TODO: Compare using plane fit for low incidence angle.\n depth = dc.depth.detach().numpy().ravel()\n inc = dc.inc_angles.detach().numpy().ravel()\n inv_cos = 1.0 / np.cos(inc)\n dist = (dc.normals * (dc.points - dc.mean)).sum(dim=1).detach().numpy().ravel()\n norm_dist = dist / depth\n\n # Fit models dependent on incidence angle\n def domain(model, n=100):\n if isinstance(model, Polynomial):\n return np.linspace(model.domain[0], model.domain[1], n)\n if isinstance(model, np.ndarray):\n return np.linspace(np.nanmin(model), np.nanmax(model), n)\n raise ValueError('Invalid domain input, only polynomial or data sample is supported.')\n\n def lims(x):\n return np.nanquantile(x, [0.001, 0.999])\n\n import matplotlib.pyplot as plt\n # figsize = 8.27, 8.27\n figsize = 6.4, 6.4\n\n def plot_fit(x, y, x_label='x', y_label='y', x_lims=None, y_lims=None):\n if x_lims is None:\n x_lims = lims(x)\n if y_lims is None:\n y_lims = lims(y)\n poly1 = Polynomial.fit(x, y, 1).convert()\n poly2 = Polynomial.fit(x, y, 2).convert()\n print('%s to %s (deg. 1 fit): %s' % (y_label, x_label, poly1))\n print('%s to %s (deg. 2 fit): %s' % (y_label, x_label, poly2))\n # xs = domain(poly1)\n xs = domain(x)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ax.plot(x, y, '.', markersize=0.5, label='data')\n ax.plot(xs, poly1(xs), 'r-', linewidth=2, label='fit deg. 1')\n ax.plot(xs, poly2(xs), 'g--', linewidth=2, label='fit deg. 2')\n ax.set_xlim(x_lims)\n ax.set_ylim(y_lims)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.grid(True)\n ax.legend()\n fig.tight_layout()\n fig.show()\n # print(np.nanquantile(x, np.linspace(0.0, 1.0, 10)))\n # print(np.nanquantile(y, np.linspace(0.0, 1.0, 10)))\n\n plot_fit(inc, dist,\n 'Incidence Angle', 'Distance to Plane [m]')\n plot_fit(inc, norm_dist,\n 'Incidence Angle', 'Distance to Plane / Depth')\n plot_fit(inv_cos, norm_dist,\n '1 / Incidence Angle Cosine', 'Distance to Plane / Depth',\n x_lims=[1.0, 11.47])\n\n return\n\n # combined.filter_neighbors_normal_angle(np.radians(30.))\n eigval_bounds = (0.0, 0.05**2)\n # max_angle = None\n max_angle = np.radians(30.)\n loss, loss_dc = min_eigval_loss(combined, r=r, offset=True,\n eigenvalue_bounds=eigval_bounds,\n max_angle=max_angle)\n\n print('Loss: %.6g' % loss.item())\n loss_dc.visualize(colors='loss')\n\n\ndef test_eigh3():\n \n def rand_C3():\n x = torch.randn(3, 3)\n return x @ x.t()\n \n n = 2\n C = torch.stack([rand_C3() for _ in range(n)])\n\n eigvals_torch, eigvecs_torch = torch.linalg.eigh(C)\n # print('eigvals_torch:\\n', eigvals_torch)\n # print('eigvecs_torch:\\n', eigvecs_torch)\n \n eigvals, eigvecs = eigh3(C)\n # print('eigvals:\\n', eigvals)\n # print('eigvecs:\\n', eigvecs)\n\n assert torch.allclose(eigvals, eigvals_torch, atol=1e-6), \\\n (eigvals, eigvals_torch, eigvals - eigvals_torch)\n assert torch.all(torch.isclose(eigvecs, eigvecs_torch, atol=1e-5)\n | torch.isclose(-eigvecs, eigvecs_torch, atol=1e-5)), \\\n (eigvecs, eigvecs_torch, eigvecs - eigvecs_torch)\n\n\ndef pose_correction_demo():\n from data.fee_corridor import dataset_names, Dataset\n from depth_correction.preproc import filtered_cloud, local_feature_cloud\n from depth_correction.transform import matrix_to_xyz_axis_angle, xyz_axis_angle_to_matrix\n import open3d as o3d\n from matplotlib import pyplot as plt\n\n cfg = Config()\n cfg.grid_res = 0.3\n cfg.min_depth = 1.0\n cfg.max_depth = 25.0\n cfg.nn_r = 0.4\n cfg.device = 'cuda'\n cfg.lr = 0.02\n cfg.n_opt_iters = 1000\n cfg.loss_kwargs['icp_inliers_ratio'] = 0.8\n cfg.loss_kwargs['icp_point_to_plane'] = False\n\n ds = Dataset(name=dataset_names[0], static_poses=False)\n id = int(np.random.choice(range(len(ds) - 1)))\n print('Using a pair of scans (%i, %i) from sequence: %s' % (id, id+1, dataset_names[0]))\n points1, pose1 = ds[id]\n points2, pose2 = ds[id + 1]\n # points2, pose2 = ds[id]\n\n cloud1 = DepthCloud.from_structured_array(points1)\n cloud2 = DepthCloud.from_structured_array(points2)\n\n cloud1 = filtered_cloud(cloud1, cfg)\n cloud2 = filtered_cloud(cloud2, cfg)\n\n pose1 = torch.tensor(pose1, dtype=torch.float32)\n pose2 = torch.tensor(pose2, dtype=torch.float32)\n xyza1 = torch.tensor(matrix_to_xyz_axis_angle(pose1[None]), dtype=torch.float32).squeeze()\n\n # xyza1_delta = torch.tensor([-0.01, 0.01, 0.02, 0.01, 0.01, -0.02], dtype=pose1.dtype)\n xyza1_delta = torch.tensor([0.5, 0.3, 0.2, 0.01, 0.01, -0.02], dtype=pose1.dtype)\n xyza1_delta.requires_grad = True\n\n optimizer = torch.optim.Adam([{'params': xyza1_delta, 'lr': cfg.lr}])\n\n cloud2 = cloud2.transform(pose2)\n cloud2.update_points()\n\n # compute cloud features necessary for optimization (like normals and incidence angles\n cloud1 = local_feature_cloud(cloud1, cfg)\n cloud2 = local_feature_cloud(cloud2, cfg)\n\n cloud1 = cloud1[cloud1.mask]\n cloud2 = cloud2[cloud2.mask]\n\n pcd2 = o3d.geometry.PointCloud()\n pcd2.points = o3d.utility.Vector3dVector(cloud2.points.detach())\n pcd2.colors = o3d.utility.Vector3dVector(torch.zeros_like(cloud2.points.detach()) + torch.tensor([0, 0, 1]))\n\n plt.figure(figsize=(20, 5))\n losses = []\n iters = []\n xyza_deltas = []\n # run optimization loop\n for it in range(cfg.n_opt_iters):\n # add noise to poses\n pose_deltas_mat = xyz_axis_angle_to_matrix(xyza1_delta[None]).squeeze()\n pose1_corr = torch.matmul(pose1, pose_deltas_mat)\n\n # transform point clouds to the same world coordinate frame\n cloud1_corr = cloud1.transform(pose1_corr)\n cloud1_corr.update_points()\n\n train_clouds = [cloud1_corr, cloud2]\n\n loss, _ = icp_loss([train_clouds], **cfg.loss_kwargs, verbose=True)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n print('At iter %i ICP loss: %f' % (it, loss.item()))\n\n iters.append(it)\n losses.append(loss.item())\n xyza_deltas.append(xyza1_delta.clone())\n\n plt.cla()\n plt.subplot(1, 3, 1)\n plt.ylabel('ICP point to %s loss' % ('plane' if cfg.loss_kwargs['icp_point_to_plane'] else 'point'))\n plt.xlabel('Iterations')\n plt.plot(iters, losses, color='k')\n plt.grid(visible=True)\n\n plt.subplot(1, 3, 2)\n plt.ylabel('L2 pose distance')\n plt.xlabel('Iterations')\n plt.plot(iters, torch.stack(xyza_deltas, dim=0).detach()[:, 0], color='r', label='dx')\n plt.plot(iters, torch.stack(xyza_deltas, dim=0).detach()[:, 1], color='g', label='dy')\n plt.plot(iters, torch.stack(xyza_deltas, dim=0).detach()[:, 2], color='b', label='dz')\n plt.grid(visible=True)\n\n plt.subplot(1, 3, 3)\n plt.ylabel('L2 orient distance')\n plt.xlabel('Iterations')\n plt.plot(iters, torch.linalg.norm(torch.stack(xyza_deltas, dim=0).detach()[:, 3:], dim=1), label='da')\n plt.grid(visible=True)\n\n plt.pause(0.01)\n plt.draw()\n\n if True and it % 200 == 0:\n print('Distance between clouds: %f', (torch.linalg.norm(pose1[:3, 3] - pose2[:3, 3])))\n print('Changed pose of the first cloud by: %s [m]' % torch.linalg.norm(xyza1_delta[:3]).detach())\n\n pcd1 = o3d.geometry.PointCloud()\n pcd1.points = o3d.utility.Vector3dVector(cloud1_corr.points.detach())\n pcd1.colors = o3d.utility.Vector3dVector(torch.zeros_like(cloud1_corr.points.detach()) +\n torch.tensor([1, 0, 0]))\n pcd1.normals = o3d.utility.Vector3dVector(cloud1_corr.normals.detach())\n\n o3d.visualization.draw_geometries([pcd1, pcd2], point_show_normal=cfg.loss_kwargs['icp_point_to_plane'])\n plt.show()\n\n\ndef test():\n test_eigh3()\n\n\ndef main():\n # test()\n # demo()\n pose_correction_demo()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ctu-vras/depth_correction","sub_path":"src/depth_correction/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":32572,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"7177715641","text":"from PIL import Image\nimport numpy as np\nimport sys\n\ndef toEnglish(asciiList):\n\tmessage = ''\n\tfor eachAscii in asciiList:\n\t\talphabet = chr(int(eachAscii,2))\n\t\tmessage += alphabet\n\t\t\n\treturn message\n\n\n\ndef main():\n\tif(len(sys.argv) == 1):\n\t\timgPath = input(\"Enter path of Image :\\n> \")\n\telse:\n\t\timgPath = sys.argv[1]\n\n\ttry:\n\t\timg = Image.open(imgPath)\n\texcept Exception as e:\n\t\tprint(e)\n\t\tprint(\"Try Again..\")\n\t\tmain()\n\tprint(\"Decoding..\\n\")\n\t\n\tarr = np.array(img)\n\n\tpix = ''\n\tlistOfAscii = []\n\tpixOver = 0\n\tfinish = False\n\n\tfor row in arr:\n\t\tfor column in row:\n\t\t\tbinPix = \"{0:08b}\".format(column)\n\t\t\tif (pixOver == 4):\n\t\t\t\tif(pix == '01100000'): #terminator\n\t\t\t\t\tfinish = True\n\t\t\t\t\tbreak\n\t\t\t\tlistOfAscii.append(pix)\n\t\t\t\tpix = ''\n\t\t\t\tpixOver = 0\n\t\t\tbinPix = \"{0:08b}\".format(column)\n\t\t\tpixEnding = binPix[-2:]\n\t\t\tpix += pixEnding\n\t\t\tpixOver += 1\n\n\t\tif(finish):\n\t\t\tbreak\n\t\n\n\tmsg = toEnglish(listOfAscii)\n\tprint(\"Message :\")\n\tmsg = msg.replace(\"\\\\n\",\"\\n\")\n\tprint(msg)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"pseudoidris/steg","sub_path":"decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34327977275","text":"from permuta import *\nfrom permuta.bisc import *\n\n\ndef min_index(L):\n mi = 0\n for i in range(len(L)):\n if L[i] < L[mi]:\n mi = i\n return mi\n\n\ndef fix_cycle(L):\n mi = min_index(L)\n return L[mi:] + L[:mi]\n\n\ndef Foata(perm):\n \"\"\"\n given a permutation perm, outputs the image under Foata's fundamental bijection\n \"\"\"\n cd = perm.cycle_decomp()\n cd = sorted(map(fix_cycle, cd), reverse=True)\n return Perm(tuple(sum(cd, [])))\n\n\ndef inverse_Foata(perm):\n \"\"\"\n given a permutation perm, outputs the preimage under Foata's fundamental bijection\n \"\"\"\n if len(perm) == 0:\n return perm\n ltrmins = list(perm.ltrmin())\n seqs = []\n for i in range(len(ltrmins) - 1):\n seqs.append(list(perm)[ltrmins[i] : ltrmins[i + 1]])\n seqs.append(list(perm)[ltrmins[-1] :])\n new_perm = [0] * len(perm)\n for i in range(len(seqs)):\n seq = seqs[i]\n if len(seq) == 1:\n new_perm[seq[0]] = seq[0]\n else:\n for j in range(len(seq)):\n new_perm[seq[j]] = seq[(j + 1) % len(seq)]\n return Perm(tuple(new_perm))\n\n\n# p = Perm((2, 4, 3, 0, 1))\n# p = Perm((0,))\n# print(p)\n# print(Foata(p))\n# print(inverse_Foata(p))\n# print(Foata(inverse_Foata(p)) == p)\n# print(inverse_Foata(Foata(p)) == p)\n\n\ndef cycle_is_increasing(c):\n fc = fix_cycle(c)\n for j in range(len(fc) - 1):\n if fc[j + 1] - fc[j] != 1:\n return False\n return True\n\n\ndef has_increasing_cycle(perm, i):\n cd = perm.cycle_decomp()\n for c in cd:\n if len(c) == i:\n if cycle_is_increasing(c):\n return True\n return False\n\n\ndef num_increasing_cycles(perm, i):\n cd = perm.cycle_decomp()\n count = 0\n for c in cd:\n if len(c) == i:\n if cycle_is_increasing(c):\n count += 1\n return count\n\n\ndef cycle_is_sorted_increasing(c):\n fc = sorted(fix_cycle(c))\n for j in range(len(fc) - 1):\n if fc[j + 1] - fc[j] != 1:\n return False\n return True\n\n\ndef has_increasing_sorted_cycle(perm, i):\n cd = perm.cycle_decomp()\n for c in cd:\n if len(c) == i:\n if cycle_is_sorted_increasing(c):\n return True\n return False\n\n\nq = 4\n# auto_bisc(lambda perm: not has_increasing_cycle(inverse_Foata(perm), q))\n# auto_bisc(lambda perm: not has_increasing_sorted_cycle(inverse_Foata(perm), q))\n\nD = {\n 4: {\n Perm((0, 1, 2, 3)): [\n {\n (0, 1),\n (2, 4),\n (1, 2),\n (3, 4),\n (2, 1),\n (4, 3),\n (3, 1),\n (0, 2),\n (2, 2),\n (1, 0),\n (3, 2),\n (1, 3),\n (4, 1),\n (4, 4),\n (0, 0),\n (1, 1),\n (0, 3),\n (2, 0),\n (4, 2),\n (1, 4),\n (2, 3),\n (3, 0),\n (3, 3),\n (4, 0), # added\n }\n ]\n },\n 5: {\n Perm((1, 2, 3, 4, 0)): [\n {\n (0, 1),\n (2, 4),\n (1, 2),\n (0, 4),\n (3, 4),\n (2, 1),\n (4, 3),\n (1, 5),\n (3, 1),\n (5, 4),\n (0, 2),\n (2, 2),\n (1, 0),\n (3, 2),\n (2, 5),\n (1, 3),\n (3, 5),\n (5, 2),\n (4, 4),\n (0, 0),\n (1, 1),\n (0, 3),\n (2, 0),\n (4, 2),\n (1, 4),\n (2, 3),\n (3, 0),\n (4, 5),\n (3, 3),\n (5, 3),\n (4, 1), # added\n (4, 0), # added\n }\n ]\n },\n}\nclpatt1 = list(D[4].keys())[0]\nmesh1 = D[4][clpatt1][0]\nclpatt2 = list(D[5].keys())[0]\nmesh2 = D[5][clpatt2][0]\n\nmp1 = MeshPatt(clpatt1, mesh1)\nmp2 = MeshPatt(clpatt2, mesh2)\n\nprint(mp1.ascii_plot())\nprint(\"------------------\")\nprint(mp2.ascii_plot())\n\nfor i in range(1, 10):\n for perm in Perm.of_length(i):\n numic = num_increasing_cycles(perm, q)\n Foata_of_perm = Foata(perm)\n nummp = sum(1 for _ in Foata_of_perm.occurrences_of(mp1)) + sum(\n 1 for _ in Foata_of_perm.occurrences_of(mp2)\n )\n if numic != nummp:\n print(perm, numic)\n print(Foata_of_perm, nummp)\n","repo_name":"akc/a-curious-mesh-pattern","sub_path":"bisc_adjacent.py","file_name":"bisc_adjacent.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41892789812","text":"from pyspark.context import SparkContext\nfrom pyspark.sql.session import SparkSession\n\nfrom pyspark.sql.functions import when\n\nsc = SparkContext(\"local\")\nspark = SparkSession(sc)\n\nproject_id = \"databootcamp-test1\"\nfile = f\"gs://{project_id}-raw-layer/data/movie_review.csv\"\nsaveTo = f\"gs://{project_id}-staging-data-layer/movie_review\"\nlines = spark.read.option(\"header\", True).csv(file)\n\n\nreviews = lines.withColumn(\n \"positive_review\", when(lines.review_str.contains(\"good\"), 1).otherwise(0)\n)\nreviews = reviews.withColumnRenamed(\"cid\", \"user_id\").withColumnRenamed(\n \"id_review\", \"review_id\"\n)\n\n\nreviews[\n [\n \"user_id\",\n \"review_id\",\n \"positive_review\",\n ]\n].write.option(\"header\", True).csv(saveTo)\n","repo_name":"hoftherose/databootcamp-airflow-dags","sub_path":"spark/review_etl.py","file_name":"review_etl.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20252210470","text":"from collections.abc import Collection\nfrom inspect import getmembers\nfrom itertools import starmap\nfrom typing import Any\n\nfrom graphql import print_schema\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import as_declarative\n\nfrom apischema import Undefined, deserialize, serialize\nfrom apischema.graphql import graphql_schema\nfrom apischema.json_schema import deserialization_schema\nfrom apischema.objects import ObjectField, set_object_fields\n\n\ndef column_field(name: str, column: Column) -> ObjectField:\n required = False\n default: Any = ...\n if column.default is not None:\n default = column.default\n elif column.server_default is not None:\n default = Undefined\n elif column.nullable:\n default = None\n else:\n required = True\n col_type = column.type.python_type\n if column.nullable:\n col_type = col_type | None\n return ObjectField(column.name or name, col_type, required, default=default)\n\n\n# Very basic SQLAlchemy support\n@as_declarative()\nclass Base:\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n columns = getmembers(cls, lambda m: isinstance(m, Column))\n if not columns:\n return\n set_object_fields(cls, starmap(column_field, columns))\n\n\nclass Foo(Base):\n __tablename__ = \"foo\"\n bar = Column(Integer, primary_key=True)\n baz = Column(String)\n\n\nfoo = deserialize(Foo, {\"bar\": 0})\nassert isinstance(foo, Foo)\nassert foo.bar == 0\nassert serialize(Foo, foo) == {\"bar\": 0, \"baz\": None}\nassert deserialization_schema(Foo) == {\n \"$schema\": \"http://json-schema.org/draft/2020-12/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"bar\": {\"type\": \"integer\"},\n \"baz\": {\"type\": [\"string\", \"null\"], \"default\": None},\n },\n \"required\": [\"bar\"],\n \"additionalProperties\": False,\n}\n\n\ndef foos() -> Collection[Foo] | None:\n ...\n\n\nschema = graphql_schema(query=[foos])\nschema_str = \"\"\"\\\ntype Query {\n foos: [Foo!]\n}\n\ntype Foo {\n bar: Int!\n baz: String\n}\"\"\"\nassert print_schema(schema) == schema_str\n","repo_name":"wyfo/apischema","sub_path":"examples/examples/sqlalchemy_support.py","file_name":"sqlalchemy_support.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"81"} +{"seq_id":"15882292023","text":"##класс который бы описывал магазин\r\nfrom items import items\r\n\r\ndef getSumm(itemsList):\r\n s = 0\r\n for item in itemsList:\r\n s+=item.price\r\n return s\r\n\r\nclass Item():\r\n def __init__(self,title,color,price,sale,id):\r\n self.title = title\r\n self.color = color\r\n self.price = price\r\n self.sale = sale\r\n self.id = id\r\n\r\nclass Shop():\r\n def __init__(self,catalog):\r\n self.title = 'GreenTutrle'\r\n self.profOrientaion = ['tech','furniture']\r\n self.balans = 9999\r\n self.catalog = catalog\r\n self.historty = []\r\n\r\n ##метод оплаты покупки\r\n def dealCompleted(self,deal):\r\n self.balans += 0.9 * deal.price\r\n self.historty.append(deal)\r\n print('Оплата прошла. Баланс {0}'.format(self.balans))\r\n\r\nclass Deal():\r\n def __init__(self,itemsList,time,bonusCode):\r\n\r\n self.itemsList = itemsList\r\n self.time = time\r\n self.bonusCode = bonusCode\r\n self.price = getSumm(itemsList)\r\n\r\n\r\n\r\nmenu = ['1 - каталог','2 - корзина 3-закрыть' ]\r\nmenu2 = ['0 - вернуться назад, id товара - открыть товар']\r\nmenu3 = ['0 - вернуться в каталог 1-добавить в корзину']\r\nbusket = []\r\nopen=True\r\nwhile open:\r\n ##прохожу по списку items и вывожу по очереди все товары\r\n print(menu)\r\n action = int(input('куда перейти?'))\r\n if action == 1:\r\n for item in items:\r\n print(item)\r\n print(menu2)\r\n a = int(input('куда?'))\r\n if a==0:\r\n print('возврат в главное мену')\r\n continue\r\n else:\r\n print(items[a-1])\r\n print(menu3)\r\n a = int(input('куда?'))\r\n if a==1:\r\n busket.append(items[a-1])\r\n print('товар успешно добавлен в коризину')\r\n continue\r\n\r\n if action ==2:\r\n print('!!!ВАША КОРЗИНА ВЫГЛЯДИТ ТАК!!!')\r\n print(busket)\r\n if action==3:\r\n break\r\n\r\ns=0\r\nfor item in busket:\r\n s+=item.get('price')\r\n\r\nprint('Сумма вашей покупки составила {0}'.format(s))\r\n##создадим объект магазина\r\nmagazine = Shop([])\r\n\r\nlays = Item('lays','pink',100,0,0)\r\ncomputar1 = Item('PC','white',789,0,1)\r\nsmarttv = Item ('smartTv','black',1999,0,2)\r\nlaptop1 = Item('Mac Book','white',1500,0,3)\r\nlaptop2 = Item('HP','color',720,0,4)\r\nredbull = Item('red bull','blue',2,0,5)\r\nredbullPack = Item('redBull pack','blue',11,0,6)\r\n\r\n\r\n\r\n\r\n","repo_name":"Lisickyi404/daniil","sub_path":"shopshop.py","file_name":"shopshop.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31262520438","text":"# Libraires\nimport streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport io\nimport seaborn as sns\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom wordcloud import wordcloud\nimport streamlit.components.v1 as components\nfrom tempfile import NamedTemporaryFile\n\n# Main Function\ndef main():\n st.sidebar.title(\"GraphGia - Data Cleaning & Exploration Tool\")\n st.sidebar.write(\n \"GraphGia is a tool for Data Cleaning, Visualization, and Exploratory Data Analysis.\"\n )\n st.sidebar.write(\"🫶\")\n app_mode = st.sidebar.selectbox(\n \"Choose the app mode\", [\"GraphGia\", \"EDA Dashboard\"]\n )\n\n if app_mode == \"GraphGia\":\n graphgia()\n elif app_mode == \"EDA Dashboard\":\n eda_dashboard()\n\n\n\n# Data Cleaning Function\ndef clean_data(data):\n st.subheader(\"Data Cleaning\")\n\n # Remove Null Values\n data = data.dropna()\n\n # Remove Duplicate Values\n data = data.drop_duplicates()\n\n # Label Encoding for Categorical Columns\n categorical_columns = data.select_dtypes(include=[\"object\"]).columns\n label_encoder = LabelEncoder()\n for col in categorical_columns:\n data[col] = label_encoder.fit_transform(data[col])\n\n st.write(\"Null and Duplicate Values Removed.\")\n st.write(\"Data Cleaned Successfully!\")\n return data\n\n# Data Export Function\ndef export_data(data, file_format, encoded=False):\n if encoded:\n if file_format == \"CSV\":\n # Export to CSV\n csv_file = data.to_csv(index=False)\n st.download_button(\n label=\"Download Clean Encoded CSV\",\n data=csv_file,\n file_name=\"encoded_data.csv\",\n mime=\"text/csv\",\n )\n else:\n st.write(\"An error occurred\")\n else:\n if file_format == \"CSV\":\n # Export to CSV\n csv_file = data.to_csv(index=False)\n st.download_button(\n label=\"Download CSV\",\n data=csv_file,\n file_name=\"exported_data.csv\",\n mime=\"text/csv\",\n )\n else:\n st.write(\"An error occurred\")\n\n# Function for Ordinal Encoding of Yes/No Columns\ndef ordinal_encode_yes_no(data, column_name):\n data[column_name] = data[column_name].map({\"No\": 0, \"Yes\": 1})\n return data\n\n# GraphGia\ndef graphgia():\n st.set_option(\"deprecation.showPyplotGlobalUse\", False)\n st.title(\"GraphGia - Data Cleaning & Exploration Tool\")\n st.write(\"This is a data cleaning & exploration tool.\")\n\n # File Uploader Widget\n uploaded_file = st.file_uploader(\"Upload a CSV or Excel file\", type=[\"csv\", \"xlsx\"])\n\n if uploaded_file is not None:\n if uploaded_file.name.endswith(\"csv\"):\n data = pd.read_csv(uploaded_file)\n elif uploaded_file.name.endswith(\"xlsx\"):\n data = pd.read_excel(uploaded_file, engine=\"openpyxl\")\n else:\n st.error(\"Unsupported file format. Please upload a CSV or Excel file.\")\n\n st.write(\"Uploaded Data:\", data)\n\n # Additional Information Button - Data Description\n if st.button(\"Show Extended Dataset Information\"):\n st.subheader(\"Dataset Description\")\n description = data.describe()\n st.write(description)\n\n # Cleaning Section\n st.subheader(\"Data Cleaning\")\n if st.button(\"Clean Data\"):\n data = clean_data(data)\n\n # Encoding Section\n st.subheader(\"Encoding Section\")\n selected_column = st.selectbox(\"Select a column to encode:\", data.columns)\n encode_method = st.radio(\"Select encoding method:\", [\"Label Encoding\", \"One-Hot Encoding\", \"Ordinal Encoding\"])\n\n if st.button(\"Encode Column\"):\n if encode_method == \"Label Encoding\":\n label_encoder = LabelEncoder()\n data[selected_column] = label_encoder.fit_transform(data[selected_column])\n st.write(f'{selected_column} Encoded Successfully using Label Encoding')\n elif encode_method == \"One-Hot Encoding\":\n data = pd.get_dummies(data, columns=[selected_column])\n st.write(f'{selected_column} Encoded Successfully using One-Hot Encoding')\n elif encode_method == \"Ordinal Encoding\":\n data = ordinal_encode_yes_no(data, selected_column)\n st.write(f'{selected_column} Encoded Successfully using Ordinal Encoding (Yes/No to 0/1)')\n\n # Data Export Section\n if st.button(\"Export Cleaned & Encoded Data\"):\n st.subheader(\"Data Export\")\n export_format = st.radio(\"Select export format:\", [\"CSV\"])\n export_data(data, export_format, encoded=True)\n\n\n# EDA Dashboard\ndef eda_dashboard():\n st.title(\"EDA Dashboard\")\n st.write(\"This is an exploratory data analysis dashboard. Upload your datasets and visualize your data interactively!\")\n\n uploaded_file = st.file_uploader(\"Choose a CSV file\", type=[\"csv\"])\n\n if uploaded_file is not None:\n df = pd.read_csv(uploaded_file)\n\n st.write(\"Dataset Statistics:\")\n st.write(df.describe())\n\n # Checkbox for user-selected visualizations\n st.sidebar.title(\"Select Visualizations\")\n histogram = st.sidebar.checkbox(\"Histogram\")\n scatter_plot = st.sidebar.checkbox(\"Scatter Plot\")\n correlation_matrix = st.sidebar.checkbox(\"Correlation Matrix\")\n bar_chart = st.sidebar.checkbox(\"Bar Chart\")\n scatter_matrix = st.sidebar.checkbox(\"Scatter Matrix\")\n box_plot = st.sidebar.checkbox(\"Box Plot\")\n pair_plot = st.sidebar.checkbox(\"Pair Plot\")\n count_plot = st.sidebar.checkbox(\"Count Plot\")\n dist_plot = st.sidebar.checkbox(\"Distribution Plot\")\n pie_chart = st.sidebar.checkbox(\"Pie Chart\")\n time_series = st.sidebar.checkbox(\"Time Series Plot\")\n violin_plot = st.sidebar.checkbox(\"Violin Plot\")\n\n # Histogram\n if histogram:\n st.subheader(\"Histogram\")\n column = st.selectbox(\"Select a column for the histogram\", df.columns)\n plt.hist(df[column], bins=20, edgecolor=\"k\")\n st.pyplot()\n\n # Generated Histogram Code\n st.write(\"**Generated Histogram Code:**\")\n hist_code = f\"\"\"\n import matplotlib.pyplot as plt\n column = '{column}'\n plt.hist(df[column], bins=20, edgecolor='k')\n plt.xlabel('{column}')\n plt.ylabel('Frequency')\n plt.title('Histogram of {column}')\n plt.show()\n \"\"\"\n st.code(hist_code, language=\"python\")\n\n # Scatter Plot\n if scatter_plot:\n st.subheader(\"Scatter Plot\")\n x_column = st.selectbox(\"Select X-axis column\", df.columns)\n y_column = st.selectbox(\"Select Y-axis column\", df.columns)\n plt.scatter(df[x_column], df[y_column])\n plt.xlabel(x_column)\n plt.ylabel(y_column)\n st.pyplot()\n\n # Generated Scatter Plot Code\n st.write(\"**Generated Scatter Plot Code:**\")\n scatter_code = f\"\"\"\n import matplotlib.pyplot as plt\n x_column = '{x_column}'\n y_column = '{y_column}'\n plt.scatter(df[x_column], df[y_column])\n plt.xlabel('{x_column}')\n plt.ylabel('{y_column}')\n plt.title('Scatter Plot: {x_column} vs {y_column}')\n plt.show()\n \"\"\"\n st.code(scatter_code, language=\"python\")\n\n # Correlation Matrix\n if correlation_matrix:\n st.subheader(\"Correlation Matrix\")\n corr_matrix = df.corr()\n plt.figure(figsize=(10, 8))\n sns.heatmap(corr_matrix, annot=True, cmap=\"coolwarm\", center=0)\n st.pyplot()\n\n # Generated Correlation Matrix Code\n st.write(\"**Generated Correlation Matrix Code**\")\n corr_code = f\"\"\"\n import seaborn as sns\n import matplotlib.pyplot as plt\n corr_matrix = df.corr()\n plt.figure(figsize=(10, 8))\n sns.heatmap(corr_matrix, annot=True, cmap='coolwarm', center=0)\n plt.title('Correlation Matrix')\n plt.show()\n \"\"\"\n st.code(corr_code, language=\"python\")\n\n # Bar Chart\n if bar_chart:\n st.subheader(\"Bar Chart\")\n bar_column = st.selectbox(\"Select a column for the bar chart\", df.columns)\n bar_chart = px.bar(df, x=bar_column)\n st.plotly_chart(bar_chart, use_container_width=True)\n\n # Generated Bar Chart Code\n st.write(\"**Generated Bar Chart Code**\")\n bar_code = f\"\"\"\n import plotly.express as px\n bar_column = '{bar_column}'\n bar_chart = px.bar(df, x=bar_column)\n bar_chart.show()\n \"\"\"\n st.code(bar_code, language=\"python\")\n\n # Scatter Matrix\n if scatter_matrix:\n st.subheader(\"Scatter Matrix Plot\")\n scatter_matrix = px.scatter_matrix(\n df, dimensions=df.columns, title=\"Scatter Matrix\"\n )\n st.plotly_chart(scatter_matrix, use_container_width=True)\n\n # Generated Scatter Matrix Code\n st.write(\"**Generated Scatter Matrix Code**\")\n scatter_matrix_code = f\"\"\"\n import plotly.express as px\n scatter_matrix = px.scatter_matrix(df, dimensions=df.columns, title='Scatter Matrix')\n scatter_matrix.show()\n \"\"\"\n st.code(scatter_matrix_code, language=\"python\")\n \n # Box Plot\n if box_plot:\n box_column = st.selectbox(\"Select a column for the box plot\", df.columns)\n plt.boxplot(df[box_column])\n plt.xlabel(box_column)\n plt.ylabel(\"Value\")\n st.pyplot()\n \n # Generated Box Plot Code\n st.write(\"**Generated Box Plot Code**\")\n box_code = f\"\"\"\n box_column = '{box_column}'\n plt.boxplot(df[box_column])\n plt.xlabel('{box_column}')\n plt.ylabel('Value')\n plt.title('Box Plot: {box_column}')\n plt.show()\n \"\"\"\n st.code(box_code, language=\"python\")\n \n # Pair Plot\n if pair_plot:\n pair_plot = sns.pairplot(df)\n st.pyplot()\n\n # Generated Pair Plot Code\n st.write(\"**Generated Pair Plot Code**\")\n pair_plot_code = \"\"\"\n import seaborn as sns\n pair_plot = sns.pairplot(df)\n plt.show()\n \"\"\"\n st.code(pair_plot_code, language=\"python\")\n \n # Count Plot\n if count_plot:\n # Count Plot\n count_column = st.selectbox(\"Select a column for the count plot\", df.columns)\n count_plot = sns.countplot(data=df, x=count_column)\n st.pyplot()\n\n # Generated Count Plot Code\n st.write(\"**Generated Count Plot Code**\")\n count_plot_code = f\"\"\"\n count_column = '{count_column}'\n count_plot = sns.countplot(data=df, x='{count_column}')\n plt.show()\n \"\"\"\n st.code(count_plot_code, language=\"python\")\n \n # Distribution Plot\n if dist_plot:\n dist_column = st.selectbox(\"Select a column for the distribution plot\", df.columns)\n sns.histplot(df[dist_column], kde=True)\n plt.xlabel(dist_column)\n plt.ylabel(\"Density\")\n st.pyplot()\n\n # Generated Distribution Plot Code\n st.write(\"**Generated Distribution Plot Code**\")\n dist_plot_code = f\"\"\"\n dist_column = '{dist_column}'\n sns.histplot(df['{dist_column}'], kde=True)\n plt.xlabel('{dist_column}')\n plt.ylabel('Density')\n plt.title('Distribution Plot: {dist_column}')\n plt.show()\n \"\"\"\n st.code(dist_plot_code, language=\"python\")\n \n # Pie Chart\n if pie_chart:\n pie_column = st.selectbox(\"Select a categorical column for the pie chart\", df.columns)\n pie_data = df[pie_column].value_counts()\n plt.pie(pie_data, labels=pie_data.index, autopct=\"%1.1f%%\", startangle=140)\n plt.axis(\"equal\")\n st.pyplot()\n\n # Generated Pie Chart Code\n st.write(\"**Generated Pie Chart Code**\")\n pie_chart_code = f\"\"\"\n pie_column = '{pie_column}'\n pie_data = df['{pie_column}'].value_counts()\n plt.pie(pie_data, labels=pie_data.index, autopct=\"%1.1f%%\", startangle=140)\n plt.axis(\"equal\")\n plt.title('Pie Chart: {pie_column}')\n plt.show()\n \"\"\"\n st.code(pie_chart_code, language=\"python\")\n \n # Time Series Plot\n if time_series:\n time_column = st.selectbox(\"Select the time-based column\", df.columns)\n df[time_column] = pd.to_datetime(df[time_column])\n df.set_index(time_column, inplace=True)\n\n # Select Y-axis column for the time series plot\n y_column = st.selectbox(\"Select Y-axis column\", df.columns)\n\n plt.plot(df.index, df[y_column])\n plt.xlabel(\"Time\")\n plt.ylabel(y_column)\n st.pyplot()\n\n # Generated Time Series Plot Code\n st.write(\"**Generated Time Series Plot Code**\")\n time_series_code = f\"\"\"\n time_column = '{time_column}'\n df['{time_column}'] = pd.to_datetime(df['{time_column}'])\n df.set_index('{time_column}', inplace=True)\n y_column = '{y_column}'\n plt.plot(df.index, df['{y_column}'])\n plt.xlabel('Time')\n plt.ylabel('{y_column}')\n plt.title('Time Series Plot: {y_column} over Time')\n plt.show()\n \"\"\"\n st.code(time_series_code, language=\"python\")\n \n # Violin Plot\n if violin_plot:\n violin_x = st.selectbox(\"Select a categorical column for X-axis\", df.columns)\n violin_y = st.selectbox(\"Select a numerical column for Y-axis\", df.columns)\n sns.violinplot(data=df, x=violin_x, y=violin_y)\n st.pyplot()\n\n # Generated Violin Plot Code\n st.write(\"**Generated Violin Plot Code**\")\n violin_code = f\"\"\"\n violin_x = '{violin_x}'\n violin_y = '{violin_y}'\n sns.violinplot(data=df, x='{violin_x}', y='{violin_y}')\n plt.show()\n \"\"\"\n st.code(violin_code, language=\"python\")\n\nif __name__ == \"__main__\":\n main()\n\nlink = \"Created by: [Gideon Ogunbanjo](https://gideonogunbanjo.netlify.app) and [Stephanie Michael](https://nwandomichael.netlify.app)\"\nst.markdown(link, unsafe_allow_html=True)\n","repo_name":"gideon-ogunbanjo/GraphGia","sub_path":"App/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22653605839","text":"#!/usr/bin/env python3\n#receiver.py\n'''\nALGO TO RECEIVE MESSAGE\n0. Import own private key and sender public key\n\tto import your own private key, you must read in the file and use the password\n\tin order to import the RSA private key\n1. Read the message, parse the content\n2. Verify sequence number is greater than rcvstate\n3. Verify signature using sender's pubkey\n4. Get shared key, use your own privkey to decrypt shared key\n5. Decrypt the content of the message using the shared key\n6. Update the rcv state\n'''\n\nimport os, sys, getopt, time\nfrom netinterface import network_interface\nfrom decrypt import decrypt_message\nfrom Crypto.Cipher import AES\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Hash import SHA256\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.Signature import PKCS1_v1_5\nfrom util import *\n\nPARTICIPANT_LIST = ''\npubkey_list_address = 'SETUP/pubkey_list.txt'\nPASS= ''\n\ndef get_sender(statefile):\n\tifile = open(statefile,'r')\n\tline = ifile.readline()\n\tmax_sqn = line[len(\"sqn: \"):]\n\tifile.close()\n\tdirectory = os.listdir(\"./\" + OWN_ADDR+\"/IN/\")\n\n\tfor f in directory:\n\t\tif f[0:4] == max_sqn:\n\t\t\treturn f[6:7]\n\n# ------------ \n# main program\n# ------------\n\ntry:\n\topts, args = getopt.getopt(sys.argv[1:], shortopts='hp:a:k:l:', longopts=['help', 'path=', 'addr='])\nexcept getopt.GetoptError:\n\tprint('Usage: python receiver.py -p -a -k -l ')\n\tsys.exit(1)\n\nfor opt, arg in opts:\n\tif opt == '-h' or opt == '--help':\n\t\tprint('Usage: python receiver.py -p -a -k -l ')\n\t\tsys.exit(0)\n\telif opt == '-p' or opt == '--path':\n\t\tNET_PATH = arg\n\telif opt == '-a' or opt == '--addr':\n\t\tOWN_ADDR = arg\n\telif opt == '-k':\n\t\tPASS = arg\n\telif opt == '-l':\n\t\tPARTICIPANT_LIST = arg\n\nif (NET_PATH[-1] != '/') and (NET_PATH[-1] != '\\\\'): NET_PATH += '/'\n\nif not os.access(NET_PATH, os.F_OK):\n\tprint('Error: Cannot access path ' + NET_PATH)\n\tsys.exit(1)\n\nif len(OWN_ADDR) > 1: OWN_ADDR = OWN_ADDR[0]\n\nif OWN_ADDR not in network_interface.addr_space:\n\tprint('Error: Invalid address ' + OWN_ADDR)\n\tsys.exit(1)\n\n# main loop\nnetif = network_interface(NET_PATH, OWN_ADDR)\n\nprint('Main loop started...')\n\nwhile True:\n\t# if KeyboardInterrupt:\n\t# \tprint('Interrupted')\n\t# \tclean()\n\t# \ttry:\n\t# \t\tsys.exit(0)\n\t# \texcept SystemExit:\n\t# \t\tos._exit(0)\n\t\n\n# Calling receive_msg() in non-blocking mode ... \n\tstatus, msg = netif.receive_msg(blocking=False)\n\t\n\tif status:\n\t\tprivkey_file = \"SETUP/rsa_privkey_\" + OWN_ADDR + \".pem\"\n\n\t\tf = open(\"./\"+ OWN_ADDR + \"/shared_key/shared_key.txt\", 'rb')\n\t\tsym_key = f.read()\n\t\tf.close()\n\t\t\n\t\tstate = \"./\"+ OWN_ADDR+\"/state.txt\"\n\t\tsrc = get_sender(state)\n\n\t\tifIncreaseSeq = (src != OWN_ADDR)\n\n\t\t# lookup public key and verify\n\t\tpubkey_read = open(pubkey_list_address, 'r')\n\t\tpubkey = pubkey_read.read()\n\t\tpubkey_read.close()\n\n\t\tfound = False\n\t\town_priv_key = read_priv_key(OWN_ADDR, PASS)\n\n\t\tRSA_cipher = PKCS1_OAEP.new(own_priv_key)\n\t\tshared_key = RSA_cipher.decrypt(sym_key)\n\t\tsender_pub_key = read_public_key(src)\n\n\t\t# msg = decrypt_message(msg, \"./\" + OWN_ADDR + \"/rcvstate.txt\", \"./\" + OWN_ADDR + \"/rsa_pubkey.pem\") \n\t\tprint(src + \": \" + decrypt_message(ifIncreaseSeq, msg, state, shared_key, sender_pub_key)) # if status is True, then a message was returned in msg\n\telse: time.sleep(2) # otherwise msg is empty\n\n# Calling receive_msg() in blocking mode ...\n\tstatus, msg = netif.receive_msg(blocking=True) \n\n\n\t\n\tprivkey_file = \"SETUP/rsa_privkey_\" + OWN_ADDR + \".pem\"\n\n\tf = open(\"./\"+ OWN_ADDR + \"/shared_key/shared_key.txt\", 'rb')\n\tsym_key = f.read()\n\tf.close()\n\t\n\tstate = \"./\"+ OWN_ADDR+\"/state.txt\"\n\tsrc = get_sender(state)\n\t\n\tifIncreaseSeq = (src != OWN_ADDR)\n\n\t# lookup public key and verify\n\tpubkey_read = open(pubkey_list_address, 'r')\n\tpubkey = pubkey_read.read()\n\tpubkey_read.close()\n\n\tfound = False\n\town_priv_key = read_priv_key(OWN_ADDR, PASS)\n\n\tRSA_cipher = PKCS1_OAEP.new(own_priv_key)\n\tshared_key = RSA_cipher.decrypt(sym_key)\n\tsender_pub_key = read_public_key(src)\n # when returns, status is True and msg contains a message \n\tprint(src +\": \"+ decrypt_message(ifIncreaseSeq,msg, state, shared_key, sender_pub_key))\n\n \n","repo_name":"evcodes/ESCAPE-Secure-Chat","sub_path":"crypto_scripts/netsim/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24753893190","text":"# 내부가 전부 1인 삼각형 대형을 만들어 놓고, 법칙을 적용해서 파스칼 값을 채워넣는 방법\n# [1] 모두 1값인 삼각형 arr\n# [2] 범위의 값 계산\n# for i(1, N)\n# for j(1, i)\n# arr[i][j] = arr[i-1][j-1] + arr[i-1][j]\n\n\nimport sys\n\nsys.stdin = open(\"s_input.txt\", \"r\")\n\nT = int(input())\n\nfor tck in range(1, T + 1):\n N = int(input())\n\n # 출력할 대상 값만 리스트로 만들어서 계산\n # [1] 모두 1인 삼각형 모양 arr 생성\n arr = [[1]*(i+1) for i in range(N)]\n\n for i in range(1, N):\n for j in range(1, i):\n arr[i][j] = arr[i - 1][j - 1] + arr[i - 1][j]\n\n print(f'#{tck}')\n for lst in arr:\n print(*lst)\n","repo_name":"HJH13579/Algorithm-daily","sub_path":"2023_02_13/파스칼 삼각형/answer2.py","file_name":"answer2.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23724995268","text":"#!/usr/bin/env python\n\"\"\"\ns2reader reads and processes Sentinel-2 L1C SAFE archives.\nThis module implements an easy abstraction to the SAFE data format used by the\nSentinel 2 misson of the European Space Agency (ESA)\n\nUSAGE:\nwith s2reader.open_safe(s2_product_path) as s2_product:\n\n # From Name\n print s2_product.mission\n print s2_product.product_level\n print s2_product.start_time\n print s2_product.processing_baseline\n print s2_product.ROO\n print s2_product.tile_number\n print s2_product.product_id\n print s2_product.format\n\n# From explore dataset\n unzipped_safe = s2reader.extract_all(s2_product_path)\n explore_data =open_safe(s2_product_path):\n for product_id, jp2_files in explore_data.iteritems():\n print product_id\n print jp2_files.keys() # resolution\n print jp2_files['R10m'].keys() # bands\n print jp2_files['R10m']['B08'] # jp2 file path\n\nSentinel-2 bands description \n\n #Resolution 10\n # B02 = blue\n # B03 = green\n # B04 = red\n # B08 = nir\n\n #Resolution 20\n # B05 = red_edge1\n # B06 = red_edge2\n # B07 = red_edge3\n # B8A = red_edge3\n # B11 = snow1\n # B12 = snow2\n\n # Resolution 60\n # B01 = AOT = Aerosols\n # B09 = vapour\n # B10 = cirrus\n \n # Other bands\n # AOT = Aerosol Optical Thickness map\n # CLD = Raster mask values range from 0 for high confidence clear sky to 100 for high confidence cloudy\n # SCL = Scene Classification. The meaning of the values is indicated in the Category Names of the band.\n # SNW = Raster mask values range from 0 for high confidence NO snow/ice to 100 for high confidence snow/ice\n # TCI = True Colour Images\n # WVP = Scene-average Water Vapour map\n\"\"\"\n\nimport os\nimport zipfile\nimport datetime\nimport logging\n\n\ndef open_safe(safe_file):\n \"\"\"Return a SentinelDataSet object.\"\"\"\n if os.path.isdir(safe_file) or os.path.isfile(safe_file):\n return SentinelDataSet(safe_file)\n else:\n raise IOError(\"file not found: %s\" % safe_file)\n\n\ndef extract_all(safe_path, dest_dir=None):\n\n if not dest_dir:\n dest_dir = os.path.dirname(safe_path)\n\n filename, extension = os.path.splitext(os.path.normpath(safe_path))\n if extension in [\".ZIP\", \".zip\"]:\n\n # Extract file\n zip_file = zipfile.ZipFile(safe_path, \"r\")\n zip_file.extractall(dest_dir)\n # zip_file.extractall(os.path.join(dest_dir, os.path.splitext(os.path.basename(safe_path))[0]) + '.SAFE')\n zip_file.close()\n\n if not os.path.isdir(os.path.join(dest_dir, os.path.splitext(os.path.basename(safe_path))[0]) + '.SAFE'):\n logging.exception('Error extracting sentinel product directory zip file. ' + str(safe_path))\n\n return os.path.join(dest_dir, os.path.splitext(os.path.basename(safe_path))[0] + '.SAFE')\n\n elif extension in [\".SAFE\"]:\n return safe_path\n\n else:\n raise IOError(\"only .SAFE folders or zipped .SAFE folders allowed\")\n\n\ndef explore_data(safe_path):\n \"\"\" Explore SAFE dataset without independently of manifest XML\"\"\"\n\n path = os.path.normpath(safe_path)\n filename, extension = os.path.splitext(os.path.normpath(path))\n\n if extension in [\".ZIP\", \".zip\"]:\n raise Exception('Explore data only works for unzipped SAFE format, ' +\n 'Unzip frist using \"extract_all\" function')\n\n data = {}\n for dir_path, subdirs, files in os.walk(path):\n # Find Granule dirs\n for name_dir in subdirs:\n if name_dir.lower() == 'granule':\n granule_dir = os.path.join(dir_path, name_dir)\n\n # Get product name and find img_data dirs\n for product_dir in os.listdir(granule_dir):\n data[product_dir] = {}\n\n if os.path.exists(os.path.join(granule_dir, product_dir, 'IMG_DATA')):\n img_data_dir = os.path.join(granule_dir, product_dir, 'IMG_DATA')\n\n # for root, product_dir, files_name in os.walk(granule_dir):\n # for name_dir1 in product_dir:\n # if name_dir1.lower() == 'img_data':\n # img_data_dir = os.path.join(dir1_path, name_dir1)\n\n # Look for resolution folders (e.g. \"R20m\")\n for res_dir in os.listdir(img_data_dir):\n data[product_dir][res_dir] = {}\n\n for file_name in os.listdir(os.path.join(img_data_dir, res_dir)):\n if os.path.splitext(file_name)[1].lower() == \".jp2\":\n\n # L2A_T29UNU_20170717T113321_AOT_10m.jp2\n image_name_tokens = str(os.path.basename(file_name)).split(\"_\")\n # product_level = image_name_tokens[0]\n # tile = image_name_tokens[1]\n # start_time = image_name_tokens[2]\n band = image_name_tokens[3]\n # resolution = os.path.basename(image_name_tokens[4])[0]\n # resolution = \"\".join([str(s) for s in resolution.split() if s.isdigit()])\n # format_type = os.path.basename(image_name_tokens[4])[1]\n\n data[product_dir][res_dir][band] = os.path.join(\n os.path.join(img_data_dir, res_dir), file_name)\n\n else:\n logging.exception('Image data not found for product: ' + str(product_dir))\n\n if len(data.keys()[0]) == 0:\n logging.exception('No Sentinel .jp2 images found ' + str(data.keys()))\n\n else:\n return data\n\n\nclass SentinelDataSet(object):\n \"\"\"\n Return SentinelDataSet object.\n This object contains relevant metadata from the SAFE file and its\n containing granules as SentinelGranule() object.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"Assert correct path and initialize.\"\"\"\n filename, extension = os.path.splitext(os.path.normpath(path))\n if extension not in [\".SAFE\", \".ZIP\", \".zip\"]:\n raise IOError(\"only .SAFE folders or zipped .SAFE folders allowed\")\n self.is_zip = True if extension in [\".ZIP\", \".zip\"] else False\n self.path = os.path.normpath(path)\n\n # Read attributes in the name\n # MMM_MSIL1C_YYYYMMDDHHMMSS_Nxxyy_ROOO_Txxxxx_Product Discriminator.SAFE\n\n s2_product_tokens = str(os.path.basename(self.path)).split(\"_\")\n\n self.mission = s2_product_tokens[0]\n self.product_level = s2_product_tokens[1]\n self.start_time = s2_product_tokens[2]\n self.start_time = datetime.datetime.strptime(s2_product_tokens[2], '%Y%m%dT%H%M%S')\n self.processing_baseline = s2_product_tokens[3]\n self.ROO = s2_product_tokens[4]\n self.tile_number = s2_product_tokens[5]\n self.product_id = os.path.splitext(s2_product_tokens[6])[0]\n self.format = os.path.splitext(s2_product_tokens[6])[1]\n\n # Product level\n # if os.path.join(self._zip_root, \"L2A_Manifest.xml\") in self._zipfile.namelist() or\n # self.manifest_safe_path = os.path.join(self.path, \"L2A_Manifest.xml\"):\n if self.product_level == \"MSIL2A\":\n self.manifest_safe_path = 'L2A_Manifest.xml'\n\n # elif os.path.join(self._zip_root, \"manifest.safe\") in self._zipfile.namelist() or\n # self.manifest_safe_path = os.path.join(self.path, \"manifest.safe\")\n elif self.product_level == \"MSIL1C\":\n self.manifest_safe_path = 'manifest.safe'\n\n else:\n raise Exception(\"Product level not supported. Only 'MSIL1C' and 'S2A' levels are supported.\")\n\n # Get manifest\n if self.is_zip:\n self._zipfile = zipfile.ZipFile(self.path, 'r')\n self._zip_root = os.path.basename(filename)\n if self._zip_root not in self._zipfile.namelist():\n if not filename.endswith(\".SAFE\"):\n self._zip_root = os.path.basename(filename) + \".SAFE/\"\n else:\n self._zip_root = os.path.basename(filename) + \"/\"\n if self._zip_root not in self._zipfile.namelist():\n logging.exception(\"unknown zipfile structure\")\n\n self.manifest_safe_path = os.path.join(self._zip_root, self.manifest_safe_path)\n\n else:\n self._zipfile = None\n self._zip_root = None\n # Find manifest.safe.\n self.manifest_safe_path = os.path.join(self.path, self.manifest_safe_path)\n\n if not os.path.isfile(self.manifest_safe_path) and self.manifest_safe_path not in self._zipfile.namelist():\n raise Exception(\"manifest.safe not found: %s\" % self.manifest_safe_path)\n\n def __enter__(self):\n \"\"\"Return self.\"\"\"\n return self\n\n def __exit__(self, t, v, tb):\n \"\"\"Do cleanup.\"\"\"\n try:\n self._zipfile.close()\n except AttributeError:\n pass\n","repo_name":"TreeMetrics/ForestChange","sub_path":"forest_change/spatial/s2reader.py","file_name":"s2reader.py","file_ext":"py","file_size_in_byte":9128,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39804604674","text":"import time\n\nfrom bus.update_data import main_update_data\n\n\ndef prepare_update_func():\n main_update_data()\n\n\ndef prepare_find_ticker_signals():\n main_find_ticker_signals()\n\n\ndef create_principal_menu():\n clear_screen()\n print(\"----------------------\")\n print(\"MENU\")\n print(\"----------------------\")\n print(\"1 - ATUALIZAÇÃO DE ATIVOS\")\n print(\"2 - BUSCA DE SINAIS\")\n print(\"----------------------\")\n option = input(\"Digite a opção desejada:\\n\")\n\n if option.isdigit():\n if int(option) == 1:\n prepare_update_func()\n elif int(option) == 2:\n prepare_find_ticker_signals()\n elif int(option) == 9:\n create_principal_menu()\n else:\n selected_option_error(create_principal_menu)\n else:\n selected_option_error(create_principal_menu)\n\n\ndef clear_screen():\n lines = 100\n print(\"\\n\" * lines)\n\n '''\n try:\n lines = os.get_terminal_size().lines\n except AttributeError:\n lines = 130\n print(\"\\n\" * lines)\n '''\n\n\ndef selected_option_error(func):\n print(\"Opção inválida, tente novamente\")\n time.sleep(2)\n func()","repo_name":"GabrielSalazar/ProjetoFinal","sub_path":"common/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5288770747","text":"import tempfile\nimport unittest\nfrom pathlib import Path\n\nimport gymnasium as gym\nimport numpy as np\nimport pandas as pd\n\nfrom dacbench.agents import StaticAgent\nfrom dacbench.benchmarks import (\n CMAESBenchmark,\n FastDownwardBenchmark,\n LubyBenchmark,\n ModCMABenchmark,\n)\nfrom dacbench.logger import Logger, load_logs, log2dataframe\nfrom dacbench.runner import run_benchmark\nfrom dacbench.wrappers import ActionFrequencyWrapper\n\n\nclass TestActionTrackingWrapper(unittest.TestCase):\n def test_logging_multi_discrete(self):\n temp_dir = tempfile.TemporaryDirectory()\n\n seed = 0\n logger = Logger(\n output_path=Path(temp_dir.name),\n experiment_name=\"test_multi_discrete_logging\",\n step_write_frequency=None,\n episode_write_frequency=1,\n )\n\n bench = ModCMABenchmark()\n bench.set_seed(seed)\n env = bench.get_environment()\n env.action_space.seed(seed)\n action_logger = logger.add_module(ActionFrequencyWrapper)\n wrapped = ActionFrequencyWrapper(env, logger=action_logger)\n action = env.action_space.sample()\n agent = StaticAgent(env, action)\n logger.set_env(env)\n\n run_benchmark(wrapped, agent, 1, logger)\n action_logger.close()\n\n logs = load_logs(action_logger.get_logfile())\n dataframe = log2dataframe(logs, wide=True)\n\n expected_actions = pd.DataFrame(\n {\n \"action_0\": [action[0]] * 10,\n \"action_1\": [action[1]] * 10,\n \"action_10\": [action[10]] * 10,\n \"action_2\": [action[2]] * 10,\n \"action_3\": [action[3]] * 10,\n \"action_4\": [action[4]] * 10,\n \"action_5\": [action[5]] * 10,\n \"action_6\": [action[6]] * 10,\n \"action_7\": [action[7]] * 10,\n \"action_8\": [action[8]] * 10,\n \"action_9\": [action[9]] * 10,\n }\n )\n\n for column in expected_actions.columns:\n # todo: seems to be an bug here. Every so ofter the last action is missing.\n # Double checked not a logging problem. Could be a seeding issue\n self.assertListEqual(\n dataframe[column].to_list()[:10],\n expected_actions[column].to_list()[:10],\n f\"Column {column}\",\n )\n\n temp_dir.cleanup()\n\n def test_logging_discrete(self):\n temp_dir = tempfile.TemporaryDirectory()\n\n seed = 0\n logger = Logger(\n output_path=Path(temp_dir.name),\n experiment_name=\"test_discrete_logging\",\n step_write_frequency=None,\n episode_write_frequency=1,\n )\n\n bench = LubyBenchmark()\n bench.set_seed(seed)\n env = bench.get_environment()\n env.action_space.seed(seed)\n\n action_logger = logger.add_module(ActionFrequencyWrapper)\n wrapped = ActionFrequencyWrapper(env, logger=action_logger)\n action = env.action_space.sample()\n agent = StaticAgent(env, action)\n logger.set_env(env)\n\n run_benchmark(wrapped, agent, 10, logger)\n action_logger.close()\n\n logs = load_logs(action_logger.get_logfile())\n dataframe = log2dataframe(logs, wide=True)\n\n expected_actions = [action] * 80\n\n self.assertListEqual(dataframe.action.to_list(), expected_actions)\n\n temp_dir.cleanup()\n\n def test_init(self):\n bench = LubyBenchmark()\n env = bench.get_environment()\n wrapped = ActionFrequencyWrapper(env)\n self.assertTrue(len(wrapped.overall_actions) == 0)\n self.assertTrue(wrapped.action_interval is None)\n wrapped.instance = [0]\n self.assertTrue(wrapped.instance[0] == 0)\n\n wrapped2 = ActionFrequencyWrapper(env, 10)\n self.assertTrue(len(wrapped2.overall_actions) == 0)\n self.assertTrue(wrapped2.action_interval == 10)\n self.assertTrue(len(wrapped2.action_intervals) == 0)\n self.assertTrue(len(wrapped2.current_actions) == 0)\n\n def test_step(self):\n bench = LubyBenchmark()\n env = bench.get_environment()\n wrapped = ActionFrequencyWrapper(env, 10)\n\n state, info = wrapped.reset()\n self.assertTrue(issubclass(type(info), dict))\n self.assertTrue(len(state) > 1)\n\n state, reward, terminated, truncated, _ = wrapped.step(1)\n self.assertTrue(len(state) > 1)\n self.assertTrue(reward <= 0)\n self.assertFalse(terminated)\n self.assertFalse(truncated)\n\n self.assertTrue(len(wrapped.overall_actions) == 1)\n self.assertTrue(wrapped.overall_actions[0] == 1)\n self.assertTrue(len(wrapped.current_actions) == 1)\n self.assertTrue(wrapped.current_actions[0] == 1)\n self.assertTrue(len(wrapped.action_intervals) == 0)\n\n def test_get_actions(self):\n bench = LubyBenchmark()\n env = bench.get_environment()\n wrapped = ActionFrequencyWrapper(env)\n wrapped.reset()\n for i in range(5):\n wrapped.step(i)\n wrapped2 = ActionFrequencyWrapper(env, 2)\n wrapped2.reset()\n for i in range(5):\n wrapped2.step(i)\n\n overall_actions_only = wrapped.get_actions()\n overall_actions, intervals = wrapped2.get_actions()\n self.assertTrue(np.array_equal(overall_actions, overall_actions_only))\n self.assertTrue(overall_actions_only == [0, 1, 2, 3, 4])\n\n self.assertTrue(len(intervals) == 3)\n self.assertTrue(len(intervals[0]) == 2)\n self.assertTrue(intervals[0] == [0, 1])\n self.assertTrue(len(intervals[1]) == 2)\n self.assertTrue(intervals[1] == [2, 3])\n self.assertTrue(len(intervals[2]) == 1)\n self.assertTrue(intervals[2] == [4])\n\n def test_rendering(self):\n bench = FastDownwardBenchmark()\n env = bench.get_environment()\n wrapped = ActionFrequencyWrapper(env, 2)\n wrapped.reset()\n for _ in range(10):\n wrapped.step(1)\n img = wrapped.render_action_tracking()\n self.assertTrue(img.shape[-1] == 3)\n\n bench = CMAESBenchmark()\n env = bench.get_environment()\n wrapped = ActionFrequencyWrapper(env, 2)\n wrapped.reset()\n wrapped.step(np.ones(10))\n img = wrapped.render_action_tracking()\n self.assertTrue(img.shape[-1] == 3)\n\n class dict_action_env:\n def __init__(self):\n self.action_space = gym.spaces.Dict(\n {\n \"one\": gym.spaces.Discrete(2),\n \"two\": gym.spaces.Box(\n low=np.array([-1, 1]), high=np.array([1, 5])\n ),\n }\n )\n self.observation_space = gym.spaces.Discrete(2)\n self.reward_range = (1, 2)\n self.metadata = {}\n\n def reset(self):\n return 1, {}\n\n def step(self, action):\n return 1, 1, 1, 1, {}\n\n env = dict_action_env()\n wrapped = ActionFrequencyWrapper(env)\n wrapped.reset()\n with self.assertRaises(NotImplementedError):\n wrapped.render_action_tracking()\n\n class tuple_action_env:\n def __init__(self):\n self.action_space = gym.spaces.Tuple(\n (\n gym.spaces.Discrete(2),\n gym.spaces.Box(low=np.array([-1, 1]), high=np.array([1, 5])),\n )\n )\n self.observation_space = gym.spaces.Discrete(2)\n self.reward_range = (1, 2)\n self.metadata = {}\n\n def reset(self):\n return 1, {}\n\n def step(self, action):\n return 1, 1, 1, 1, {}\n\n env = tuple_action_env()\n wrapped = ActionFrequencyWrapper(env)\n wrapped.reset()\n with self.assertRaises(NotImplementedError):\n wrapped.render_action_tracking()\n\n class multi_discrete_action_env:\n def __init__(self):\n self.action_space = gym.spaces.MultiDiscrete([2, 3])\n self.observation_space = gym.spaces.Discrete(2)\n self.reward_range = (1, 2)\n self.metadata = {}\n\n def reset(self):\n return 1, {}\n\n def step(self, action):\n return 1, 1, 1, 1, {}\n\n env = multi_discrete_action_env()\n wrapped = ActionFrequencyWrapper(env, 5)\n wrapped.reset()\n for _ in range(10):\n wrapped.step([1, 2])\n img = wrapped.render_action_tracking()\n self.assertTrue(img.shape[-1] == 3)\n\n class multi_binary_action_env:\n def __init__(self):\n self.action_space = gym.spaces.MultiBinary(2)\n self.observation_space = gym.spaces.Discrete(2)\n self.reward_range = (1, 2)\n self.metadata = {}\n\n def reset(self):\n return 1, {}\n\n def step(self, action):\n return 1, 1, 1, 1, {}\n\n env = multi_binary_action_env()\n wrapped = ActionFrequencyWrapper(env)\n wrapped.reset()\n wrapped.step([1, 0])\n img = wrapped.render_action_tracking()\n self.assertTrue(img.shape[-1] == 3)\n\n class large_action_env:\n def __init__(self):\n self.action_space = gym.spaces.Box(low=np.zeros(15), high=np.ones(15))\n self.observation_space = gym.spaces.Discrete(2)\n self.reward_range = (1, 2)\n self.metadata = {}\n\n def reset(self):\n return 1, {}\n\n def step(self, action):\n return 1, 1, 1, 1, {}\n\n env = large_action_env()\n wrapped = ActionFrequencyWrapper(env)\n wrapped.reset()\n wrapped.step(0.5 * np.ones(15))\n img = wrapped.render_action_tracking()\n self.assertTrue(img.shape[-1] == 3)\n","repo_name":"automl/DACBench","sub_path":"tests/wrappers/test_action_tracking_wrapper.py","file_name":"test_action_tracking_wrapper.py","file_ext":"py","file_size_in_byte":10052,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"5866257253","text":"import numpy as np\r\nimport copy\r\n\r\n\r\nclass Optimization:\r\n def __init__(self, optimizer, function, early_stop=0.00001, iter_max=10000):\r\n self.function = function\r\n self.early_stop = early_stop\r\n self.iter_max = iter_max\r\n\r\n self.optimizer_list = list()\r\n\r\n for i in range(self.function.dim):\r\n self.optimizer_list.append(copy.copy(optimizer))\r\n\r\n def optimize(self, init):\r\n val_prev = np.array(init, dtype=np.float)\r\n val_new = np.array(init, dtype=np.float)\r\n iter_X = list()\r\n iter_Y = list()\r\n\r\n iter_X.append(init)\r\n iter_Y.append(self.function.get_value(init))\r\n\r\n iter_cnt = 0\r\n for iter_cnt in range(self.iter_max):\r\n\r\n gradient = self.function.get_diff_value(val_prev)\r\n for i in range(len(self.optimizer_list)):\r\n val_new[i] = self.optimizer_list[i].update(val_prev[i], gradient[i])\r\n iter_X.append(val_new.copy())\r\n iter_Y.append(self.function.get_value(val_new))\r\n diff = abs(self.function.get_value(val_new) - self.function.get_value(val_prev))\r\n\r\n if diff < self.early_stop or np.isnan(diff):\r\n break\r\n else:\r\n val_prev = val_new.copy()\r\n\r\n return iter_X[-1], iter_X, iter_Y, iter_cnt\r\n","repo_name":"Kyuhyun-Cho/Optimization_NumericalAnalysis_KMU","sub_path":"SGD/Optimization/Optimization.py","file_name":"Optimization.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74267376904","text":"import sys\nimport csv\nfrom collections import defaultdict\n\nfname = sys.argv[1]\nwith open(fname, \"r\") as fh:\n table = [row for row in csv.reader(fh)]\nprint(\"Reservation Table: \")\nprint(*(\" \".join(row) for row in table), sep='\\n')\n\ndef get_forbidden_latencies(stage):\n return set(j-i for i in range(len(stage)-1)\n for j in range(i+1, len(stage)) if stage[i] == stage[j] == 'x')\n\nforbid_lats = set(lats for stage in table for lats in get_forbidden_latencies(stage))\nprint(\"Forbidden Latencies: %r\"%forbid_lats)\n\nmax_forb = 0\ncoll_vec = 0\nfor lat in forbid_lats:\n coll_vec |= 1 << (lat-1)\n max_forb = max(max_forb, lat)\nprint(\"Collision Vector: {0:b} (={0})\".format(coll_vec))\n\n# build graph\ngraph = defaultdict(list)\ndone = set()\nnodes = {coll_vec}\nwhile True:\n for u in nodes:\n if u not in done:\n curr = u\n break\n else:\n break\n for i in range(max_forb+1):\n if not (curr >> i) & 1:\n state = (curr >> (i+1)) | coll_vec\n graph[curr].append((state, i+1))\n nodes.add(state)\n done.add(curr)\nprint(\"States:\\n\", *(\"{0:b}\".format(i) for i in nodes))\nprint(\"\\nState Transition Graph: \")\nfor u in graph:\n for v, w in graph[u]:\n print(\"{0:b} -> {1:b} : {2}\".format(u, v, w))\n\ndef find_cycles(G, u, cycle):\n Cycles = []\n for v, w in G[u]:\n if v == cycle[0][0]:\n Cycles.append(cycle + [(v, w)])\n elif not any(v == c[0] for c in cycle):\n Cycles.extend(c for c in find_cycles(G, v, cycle + [(v, w)]))\n return Cycles\n\ndef is_rotated(c1, c2):\n c = c1 + c1\n for i in range(len(c)-len(c2)+1):\n if c[i:i+len(c2)] == c2: return True\n return False\n\ngreedy_cycle = None\nMAL = float('inf')\nCycles = {}\ngreedyCycles = {}\nprint(\"\\nSimple Cycles\\tAvg. Latency\")\n\nfor u in nodes:\n greedy_cycle = None\n MAL = float('inf')\n for cyc in find_cycles(graph, u, [(u, 0)]):\n wts = tuple(i[1] for i in cyc[1:])\n avg = sum(wts)/len(wts)\n if any((is_rotated(wts, c) and Cycles[c]==avg) for c in Cycles): continue\n Cycles[wts] = avg\n wts = '('+\", \".join(map(str, wts))+')'\n if avg < MAL:\n MAL = avg\n greedy_cycle = wts\n print(\"{0:^13s}\\t{1:^12.2f}\".format(wts, avg))\n if greedy_cycle is not None: greedyCycles[greedy_cycle] = MAL\n\nMAL = float('inf')\nprint(\"\\nGreedy Cycles\\tAvg. Latency\")\ngreedy_cyc = None\nfor cyc in greedyCycles:\n if greedyCycles[cyc] < MAL:\n MAL = greedyCycles[cyc]\n greedy_cyc = cyc\n print(\"{0:^13s}\\t{1:^12.2f}\".format(cyc, greedyCycles[cyc]))\n\nprint(f\"\\nGreedy Cycle: {greedy_cyc}\\n\"\n f\"MAL: {MAL:.2f}\\n\"\n f\"Throughput: {1/MAL:.2f}\")\n","repo_name":"thevarunsharma/High-Performance-Computing","sub_path":"Reservation-Table-MAL/findMAL.py","file_name":"findMAL.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15478967046","text":"import os\nimport schedule\nimport time\nimport datetime\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calgary_web_portal.settings')\n\nimport django\ndjango.setup()\n\nfrom email.mime.image import MIMEImage\nfrom django.core.mail import send_mail, EmailMultiAlternatives\nfrom django.conf.global_settings import EMAIL_HOST_USER\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\n\nfrom customer_dashboard.braintree import get_gateway\nfrom customer_dashboard.models import UserProfile, AccountNumber, Transaction\n\n\ndef off_for_children_users(parent_user):\n users = parent_user.children.all()\n print('children', users)\n if users:\n for user in users:\n user.useraccess.key_finder = False\n user.useraccess.door_finder = False\n user.useraccess.save()\n print('parent_user', user)\n usrs = user.children.all()\n if usrs:\n off_for_children_users(user)\n return None\n\n\ndef check_subscription():\n print('Run Check Subscription')\n accounts = AccountNumber.objects.all()\n for account in accounts:\n print('account', account)\n # if subscription is expire today (send mail to primary user and calgary)\n if account.is_registered and account.exp_date and account.exp_date < datetime.date.today():\n # get user profile object\n try:\n user_profile = UserProfile.objects.get(user__user_type='primary', account_number=account)\n except UserProfile.DoesNotExist:\n print('*' * 80)\n print('if', account)\n print('*' * 80)\n else:\n # get user object\n user_access = user_profile.user.useraccess\n # set paid services off\n if user_access.key_finder and user_access.door_finder:\n user_access.key_finder = False\n user_access.door_finder = False\n user_access.save()\n print('parent', user_profile.user)\n off_for_children_users(user_profile.user)\n # send mail to customer\n subject_customer = 'Calgary Lock and Safe'\n message_customer = 'Calgary services expire today'\n\n msg = EmailMultiAlternatives(\n subject_customer,\n render_to_string(\n 'email/service_expire_today.html',\n {\n 'full_name': user_profile.user.full_name(),\n 'account_number': account.account_number,\n 'first_name': user_profile.user.first_name,\n 'last_name': user_profile.user.last_name,\n 'phone': user_profile.user.phone,\n 'file_numbers': user_profile.file_numbers.all()\n }\n ),\n EMAIL_HOST_USER,\n [user_profile.user.email]\n )\n msg.content_subtype = 'html' # Main content is text/html\n msg.mixed_subtype = 'related' # This is critical, otherwise images will be displayed as attachments\n with open(settings.STATIC_DIR + '/images/logo.png', 'rb') as f:\n file = f.read()\n image = MIMEImage(file)\n image.add_header('Content-ID', '')\n msg.attach(image)\n msg.send()\n\n # send mail to calgary\n subject_calgary = f\"{account.account_number} - {user_profile.user.full_name()}\"\n message_calgary = f\"{account.account_number} expire today\"\n send_mail(\n subject_calgary, message_calgary, EMAIL_HOST_USER, [settings.EMAIL_TO_CALGARY],\n html_message=render_to_string(\n 'email/service_expire_today_to_calgary.html',\n {\n 'account_number': account.account_number,\n 'first_name': user_profile.user.first_name,\n 'last_name': user_profile.user.last_name,\n 'phone': user_profile.user.phone,\n 'file_numbers': user_profile.file_numbers.all()\n }\n )\n )\n\n # 7 days before expiration of subscription (send mail to primary user and calgary)\n elif account.is_registered and account.exp_date - datetime.timedelta(days=7) == datetime.datetime.today():\n try:\n user_profile = UserProfile.objects.get(user__user_type='primary', account_number=account)\n except UserProfile.DoesNotExist:\n print('*' * 80)\n print('else', account)\n print('*' * 80)\n\n # send mail to customer\n subject_customer = 'Calgary Lock and Safe'\n message_customer = 'Calgary services expire after 7 days'\n\n msg = EmailMultiAlternatives(\n subject_customer,\n render_to_string(\n 'email/service_expire_after_7_days.html',\n {\n 'full_name': user_profile.user.full_name(),\n 'account_number': account.account_number,\n 'expire_date': account.exp_date,\n 'first_name': user_profile.user.first_name,\n 'last_name': user_profile.user.last_name,\n 'phone': user_profile.user.phone,\n 'file_numbers': user_profile.file_numbers.all()\n }\n ),\n EMAIL_HOST_USER,\n [user_profile.user.email],\n )\n msg.content_subtype = 'html' # Main content is text/html\n msg.mixed_subtype = 'related' # This is critical, otherwise images will be displayed as attachments\n with open(settings.STATIC_DIR + '/images/logo.png', 'rb') as f:\n file = f.read()\n image = MIMEImage(file)\n image.add_header('Content-ID', '')\n msg.attach(image)\n msg.send()\n\n # send mail to calgary\n subject_calgary = f\"{account.account_number} - {user_profile.user.full_name()}\"\n message_calgary = f\"{account.account_number} expires at {account.exp_date}\"\n send_mail(\n subject_calgary, message_calgary, EMAIL_HOST_USER, [settings.EMAIL_TO_CALGARY],\n html_message=render_to_string(\n 'email/service_expire_after_7_days_to_calgary.html',\n {\n 'account_number': account.account_number,\n 'expire_date': account.exp_date,\n 'first_name': user_profile.user.first_name,\n 'last_name': user_profile.user.last_name,\n 'phone': user_profile.user.phone,\n 'file_numbers': user_profile.file_numbers.all()\n }\n )\n )\n\n\ndef update_braintree_status():\n print('run braintree status updation code')\n gateway = get_gateway()\n payments = Transaction.objects.exclude(bt_status='settled')\n for payment in payments:\n try:\n payment.bt_status = gateway.transaction.find(payment.transaction).status\n payment.save()\n except Exception as e:\n print('*' * 80)\n print(e)\n print('*' * 80)\n pass\n\n\ndef run_task():\n print('start run')\n schedule.every().day.at(\"00:05\").do(check_subscription)\n schedule.every().minute.do(update_braintree_status)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\nif __name__ == '__main__':\n run_task()\n","repo_name":"shakeelrauf/cls-customer-backend","sub_path":"chk_subs.py","file_name":"chk_subs.py","file_ext":"py","file_size_in_byte":8108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2179628391","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n # Basically implement a BFS\n res = []\n queue = collections.deque() \n queue.append(root)\n while queue:\n level = []\n qlen = len(queue)\n for i in range(qlen):\n curNode = queue.popleft()\n if curNode:\n level.append(curNode.val)\n queue.append(curNode.left)\n queue.append(curNode.right)\n if level:\n res.append(level)\n return res \n","repo_name":"samkim777/Leetcode-Solns","sub_path":"Solutions/Medium/102 Binary tree level order traversal.py","file_name":"102 Binary tree level order traversal.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17444875539","text":"import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision.transforms import ToTensor\n\nbatch_size = 64\n\n# Get cpu, gpu or mps device for training.\ndevice = (\n \"cuda\"\n if torch.cuda.is_available()\n else \"mps\"\n if torch.backends.mps.is_available()\n else \"cpu\"\n)\nprint(f\"Using {device} device\")\n\n\ntest_data = datasets.FashionMNIST(\n root=\"data\",\n train=False,\n download=True,\n transform=ToTensor(),\n)\ntest_dataloader = DataLoader(test_data, batch_size=batch_size)\nprint(test_data)\n\n\n# Define model\nclass NeuralNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(28*28, 512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, 10)\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n\n\nmodel = NeuralNetwork().to(device)\nmodel.load_state_dict(torch.load(\"model.pth\"))\n\n\nclasses = [\n \"T-shirt/top\",\n \"Trouser\",\n \"Pullover\",\n \"Dress\",\n \"Coat\",\n \"Sandal\",\n \"Shirt\",\n \"Sneaker\",\n \"Bag\",\n \"Ankle boot\",\n]\n\nmodel.eval()\nx, y = test_data[0][0], test_data[0][1]\nwith torch.no_grad():\n x = x.to(device)\n pred = model(x)\n predicted, actual = classes[pred[0].argmax(0)], classes[y]\n print(f'Predicted: \"{predicted}\", Actual: \"{actual}\"')\n","repo_name":"silver6wings/MLFlowSample","sub_path":"src/pytorch/model_testing.py","file_name":"model_testing.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26297132826","text":"import os\nimport torch\nimport shutil\nimport h5py\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom utils.data_container import get_data_loader_base\nfrom models.model import create_model\nfrom utils.util import get_optimizer, get_scheduler\nfrom utils.train import train_decompose\nfrom utils import normalization\n\n\ndef preprocessing(base_model_name,\n conf,\n loss,\n graph,\n data_category,\n device,\n data_set,\n optimizer_name,\n scheduler_name):\n if base_model_name == 'LinearDecompose':\n data_loader = get_data_loader_base(base_model_name=base_model_name, dataset=conf['data']['dataset'],\n batch_size=conf['batch_size_base'],\n _len=conf['data']['_len'], data_category=data_category, device=device,\n Normal_Method=conf['data']['Normal_Method'])\n model, trainer = create_model(base_model_name, loss, conf['Base'][base_model_name], data_category, device,\n graph)\n save_folder = os.path.join('saves', f\"{conf['name']}_{base_model_name}\", f'{data_set}_{\"\".join(data_category)}')\n run_folder = os.path.join('run', f\"{conf['name']}_{base_model_name}\", f'{data_set}_{\"\".join(data_category)}')\n optimizer = get_optimizer(optimizer_name, model.parameters(), conf['optimizerbase'][optimizer_name]['lr'])\n scheduler = get_scheduler(scheduler_name, optimizer, **conf['scheduler'][scheduler_name])\n shutil.rmtree(save_folder, ignore_errors=True)\n os.makedirs(save_folder)\n shutil.rmtree(run_folder, ignore_errors=True)\n os.makedirs(run_folder)\n model = train_decompose(model=model,\n dataloaders=data_loader,\n trainer=trainer,\n optimizer=optimizer,\n scheduler=scheduler,\n folder=save_folder,\n tensorboard_floder=run_folder,\n device=device,\n **conf['train'])\n model.load_state_dict(torch.load(f\"{os.path.join(save_folder, 'best_model.pkl')}\")['model_state_dict'])\n return model.encoder, model.decoder\n if base_model_name == 'SvdDecompose':\n data = get_data_loader_base(base_model_name=base_model_name, dataset=conf['data']['dataset'],\n batch_size=conf['batch_size_base'],\n _len=conf['data']['_len'], data_category=data_category, device=device,\n Normal_Method=conf['data']['Normal_Method'])\n data = torch.from_numpy(data).float().to(device)\n save_folder = os.path.join('saves', f\"{conf['name']}_{base_model_name}\", f'{data_set}_{\"\".join(data_category)}')\n run_folder = os.path.join('run', f\"{conf['name']}_{base_model_name}\", f'{data_set}_{\"\".join(data_category)}')\n model, trainer = create_model(base_model_name, loss, conf['Base'][base_model_name], data_category, device,\n graph)\n shutil.rmtree(save_folder, ignore_errors=True)\n os.makedirs(save_folder)\n shutil.rmtree(run_folder, ignore_errors=True)\n os.makedirs(run_folder)\n model.decompose(data)\n return model.encoder, model.decoder\n\n\ndef preprocessing_for_metric(data_category: list,\n dataset:str,\n method:str,\n hidden_size:int,\n Normal_Method: str,\n _len: list):\n data = []\n normal_method = getattr(normalization, Normal_Method)\n for category in data_category:\n normal = normal_method()\n with h5py.File(f\"data/{dataset}/{category}_data.h5\", 'r') as hf:\n data_pick = hf[f'{category}_pick'][:]\n with h5py.File(f\"data/{dataset}/{category}_data.h5\", 'r') as hf:\n data_drop = hf[f'{category}_drop'][:]\n data.append(normal.fit_transform(np.stack([data_pick, data_drop], axis=2)))\n\n\n data = np.concatenate(data, axis=1).transpose((0,2,1))\n data = data[:-(_len[0]+_len[1])]\n T, input_dim, N = data.shape\n inputs = data.reshape(-1, N)\n u, s, v = np.linalg.svd(inputs)\n w = np.diag(s[:hidden_size]).dot(v[:hidden_size,:]).T\n\n support = None\n if method == 'big':\n graph = cdist(w, w, metric='euclidean')\n # support = cdist(w, w, metric='correlation')\n # support[support<=0.75] = 0\n # support[support>0.75] = 1\n # s,v,d = np.linalg.svd(graph)\n # print(v)\n support = graph * -1 / np.std(graph) ** 2\n support = np.exp(support)\n # s,v,d = np.linalg.svd(support)\n # print(support)\n # print(v)\n elif method == 'small':\n support = w\n print(w.shape)\n return support\n","repo_name":"gpxlcj/CGCDemandPrediction","sub_path":"utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"28036277098","text":"from typing import Dict, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\nfrom torch import Tensor\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\nAA_VOCAB = {\n \"#\": 0,\n \"A\": 1,\n \"R\": 2,\n \"N\": 3,\n \"D\": 4,\n \"C\": 5,\n \"Q\": 6,\n \"E\": 7,\n \"G\": 8,\n \"H\": 9,\n \"I\": 10,\n \"L\": 11,\n \"K\": 12,\n \"M\": 13,\n \"F\": 14,\n \"P\": 15,\n \"S\": 16,\n \"T\": 17,\n \"W\": 18,\n \"Y\": 19,\n \"V\": 20,\n \"X\": 21,\n \"-\": 22,\n \"O\": 23,\n \"*\": 24,\n}\nRELEVANT_VIRUSES = {\"SARS-CoV1\", \"SARS-CoV2\"}\nRELEVANT_KEYS = {\n \"Neutralising Vs\",\n \"Not Neutralising Vs\",\n \"Binds to\",\n \"Doesn't Bind to\",\n}\nTYPE_MAP = {\n \"S1; non-RBD\": \"ntd\",\n \"S2 (quaternary glycan epitope)\": \"s2\",\n \"S: NTD\": \"ntd\",\n \"S: RBD\": \"rbd\",\n \"S; NTD\": \"ntd\",\n \"S; Possibly RBD\": \"rbd\",\n \"S; RBD\": \"rbd\",\n \"S; RBD/non-RBD\": \"unk\",\n \"S; S1\": \"unk\",\n \"S; S1 non-RBD\": \"ntd\",\n \"S; S1/S2\": \"unk\",\n \"S; S1/S2 Cleavage Site\": \"unk\",\n \"S; S2\": \"s2\",\n \"S; S2 (quaternary glycan epitope)\": \"s2\",\n \"S; S2 Stem Helix\": \"s2\",\n \"S; Unk\": \"unk\",\n \"S; non-RBD\": \"unk\",\n \"S; non-S1\": \"s2\",\n \"S; probably RBD (implied by clustering)\": \"rbd\",\n}\n\n\nclass RNNEncoder(nn.Module):\n \"\"\"Implements a multi-layer RNN.\n\n This module can be used to create multi-layer RNN models, and\n provides a way to reduce to output of the RNN to a single hidden\n state by pooling the encoder states either by taking the maximum,\n average, or by taking the last hidden state before padding.\n Padding is dealt with by using torch's PackedSequence.\n\n Attributes\n ----------\n rnn: nn.Module\n The rnn submodule\n\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n hidden_size: int,\n n_layers: int = 1,\n rnn_type: str = \"lstm\",\n dropout: float = 0,\n attn_dropout: float = 0,\n attn_heads: int = 1,\n bidirectional: bool = False,\n layer_norm: bool = False,\n highway_bias: float = -2,\n rescale: bool = True,\n enforce_sorted: bool = False,\n **kwargs,\n ) -> None:\n \"\"\"Initializes the RNNEncoder object.\n Parameters\n ----------\n input_size : int\n The dimension the input data\n hidden_size : int\n The hidden dimension to encode the data in\n n_layers : int, optional\n The number of rnn layers, defaults to 1\n rnn_type : str, optional\n The type of rnn cell, one of: `lstm`, `gru`, `sru`\n defaults to `lstm`\n dropout : float, optional\n Amount of dropout to use between RNN layers, defaults to 0\n bidirectional : bool, optional\n Set to use a bidrectional encoder, defaults to False\n layer_norm : bool, optional\n [SRU only] whether to use layer norm\n highway_bias : float, optional\n [SRU only] value to use for the highway bias\n rescale : bool, optional\n [SRU only] whether to use rescaling\n enforce_sorted: bool\n Whether rnn should enforce that sequences are ordered by\n length. Requires True for ONNX support. Defaults to False.\n kwargs\n Additional parameters to be passed to SRU when building\n the rnn.\n Raises\n ------\n ValueError\n The rnn type should be one of: `lstm`, `gru`, `sru`\n\n \"\"\"\n super().__init__()\n\n self.rnn_type = rnn_type\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.enforce_sorted = enforce_sorted\n self.output_size = 2 * hidden_size if bidirectional else hidden_size\n\n if rnn_type in [\"lstm\", \"gru\"]:\n rnn_fn = nn.LSTM if rnn_type == \"lstm\" else nn.GRU\n self.rnn = rnn_fn(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=n_layers,\n dropout=dropout,\n bidirectional=bidirectional,\n )\n elif rnn_type == \"sru\":\n from sru import SRU\n\n try:\n self.rnn = SRU(\n input_size,\n hidden_size,\n num_layers=n_layers,\n dropout=dropout,\n bidirectional=bidirectional,\n layer_norm=layer_norm,\n rescale=rescale,\n highway_bias=highway_bias,\n **kwargs,\n )\n except TypeError:\n raise ValueError(f\"Unkown kwargs passed to SRU: {kwargs}\")\n elif rnn_type == \"srupp\":\n from sru import SRUpp\n\n try:\n self.rnn = SRUpp(\n input_size,\n hidden_size,\n hidden_size // 2,\n num_layers=n_layers,\n highway_bias=highway_bias,\n dropout=dropout,\n attn_dropout=attn_dropout,\n num_heads=attn_heads,\n layer_norm=layer_norm,\n attn_layer_norm=True,\n bidirectional=bidirectional,\n **kwargs,\n )\n except TypeError:\n raise ValueError(f\"Unkown kwargs passed to SRU: {kwargs}\")\n else:\n raise ValueError(f\"Unkown rnn type: {rnn_type}, use of of: gru, sru, lstm\")\n\n def forward(\n self,\n data: Tensor,\n state: Optional[Tensor] = None,\n padding_mask: Optional[Tensor] = None,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass through the network.\n Parameters\n ----------\n data : Tensor\n The input data, as a float tensor of shape [B x S x E]\n state: Tensor\n An optional previous state of shape [L x B x H]\n padding_mask: Tensor, optional\n The padding mask of shape [B x S], dtype should be bool\n Returns\n -------\n Tensor\n The encoded output, as a float tensor of shape [B x S x H]\n Tensor\n The encoded state, as a float tensor of shape [L x B x H]\n\n \"\"\"\n data = data.transpose(0, 1)\n if padding_mask is not None:\n padding_mask = padding_mask.transpose(0, 1)\n\n if padding_mask is None:\n # Default RNN behavior\n output, state = self.rnn(data, state)\n elif self.rnn_type == \"sru\":\n # SRU takes a mask instead of PackedSequence objects\n # ~ operator negates bool tensor in torch 1.3\n output, state = self.rnn(data, state, mask_pad=(~padding_mask))\n elif self.rnn_type == \"srupp\":\n # SRU takes a mask instead of PackedSequence objects\n # ~ operator negates bool tensor in torch 1.3\n output, state, _ = self.rnn(data, state, mask_pad=(~padding_mask))\n else:\n # Deal with variable length sequences\n lengths = padding_mask.long().sum(dim=0)\n # Pass through the RNN\n packed = nn.utils.rnn.pack_padded_sequence(\n data, lengths.cpu(), enforce_sorted=self.enforce_sorted\n )\n output, state = self.rnn(packed, state)\n output, _ = nn.utils.rnn.pad_packed_sequence(output, total_length=data.size(0))\n\n # TODO investigate why PyTorch returns type Any for output\n return output.transpose(0, 1).contiguous(), state # type: ignore\n\n\nclass SRUppModel(nn.Module):\n\n def __init__(\n self,\n num_aa: int,\n num_tokens: int,\n n_layers: int = 1,\n hidden_dim: int = 256,\n dropout: float = 0,\n ab_pad_id: int = 0,\n virus_pad_id: int = 0,\n use_srupp: bool = False,\n ):\n super().__init__()\n\n # Virus encoder\n self.hidden_dim = hidden_dim\n self.seq_embedding = nn.Embedding(num_aa, hidden_dim // 4)\n\n # Antibody encoder\n rnn_type = \"srupp\" if use_srupp else \"sru\"\n self.dropout = nn.Dropout(dropout)\n self.rnn_ab = RNNEncoder(\n hidden_dim // 4,\n hidden_dim // 2,\n n_layers=n_layers,\n rnn_type=rnn_type,\n bidirectional=True,\n dropout=dropout,\n )\n self.ab_pad_id = ab_pad_id\n self.virus_pad_id = virus_pad_id\n self.num_tokens = num_tokens\n\n @property\n def output_dim(self):\n return self.hidden_dim\n\n def forward(self, ab_seq):\n # Compute padding mask\n padding_ab = ab_seq != self.ab_pad_id\n\n # Compute token embeddings\n ab_emb = self.dropout(self.seq_embedding(ab_seq))\n\n # Pass through SRUpp layers\n ab_encodings, _ = self.rnn_ab(ab_emb, padding_mask=padding_ab)\n return ab_encodings\n\n\nclass MultiABOnlyCoronavirusModel(pl.LightningModule):\n\n def __init__(\n self,\n num_aa: int,\n num_tokens: int,\n n_layers: int = 1,\n hidden_dim: int = 128,\n dropout: float = 0,\n lr: float = 1e-3,\n ab_pad_id: int = 0,\n virus_pad_id: int = 0,\n neut_lambda: float = 0.5,\n use_srupp: bool = False,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.lr = lr\n self.ab_pad_id = ab_pad_id\n self.virus_pad_id = virus_pad_id\n self.neut_lambda = neut_lambda\n self.dropout = nn.Dropout(dropout)\n self.encoder = SRUppModel( # type: ignore\n num_aa=num_aa,\n num_tokens=num_tokens,\n n_layers=n_layers,\n hidden_dim=hidden_dim,\n dropout=dropout,\n ab_pad_id=ab_pad_id,\n virus_pad_id=virus_pad_id,\n use_srupp=use_srupp,\n )\n encoder_dim = self.encoder.output_dim\n self.fc_neut = nn.Sequential(\n nn.Linear(encoder_dim, hidden_dim // 2),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(hidden_dim // 2, 2),\n )\n #self.neut_auc_sars2 = AUROCWithMask(\n # num_classes=2, average=None, compute_on_step=False\n #)\n #self.neut_auc_sars1 = AUROCWithMask(\n # num_classes=2, average=None, compute_on_step=False\n #)\n\n @classmethod\n def add_extra_args(cls) -> Dict:\n extra_args = {\n \"num_aa\": len(AA_VOCAB),\n \"num_tokens\": 1024,\n \"ab_pad_id\": AA_VOCAB[\"#\"],\n \"virus_pad_id\": AA_VOCAB[\"#\"],\n }\n return extra_args\n\n def average(self, data, padding):\n data = (data * padding.unsqueeze(2)).sum(dim=1)\n padding_sum = padding.sum(dim=1)\n padding_sum[padding_sum == 0] = 1.0\n avg = data / padding_sum.unsqueeze(1)\n return avg\n\n def forward(self, ab_seq):\n padding_mask_ab = (ab_seq != self.ab_pad_id).float()\n ab_encodings = self.encoder(ab_seq)\n output_encoding = self.average(ab_encodings, padding_mask_ab)\n output_encoding = self.dropout(output_encoding)\n neut_logits = self.fc_neut(output_encoding).squeeze(1)\n\n return neut_logits\n\n def configure_callbacks(self):\n return [ModelCheckpoint(monitor=\"auc\", save_top_k=1, mode=\"max\")]\n\n def compute_metrics(self, batch):\n ab_seq = batch[\"ab\"]\n neut_label = batch[\"neut_label\"]\n neut_mask = batch[\"neut_mask\"]\n neut_logits = self(ab_seq)\n neut_loss = F.binary_cross_entropy_with_logits(\n neut_logits, neut_label.float(), reduction=\"none\"\n )\n neut_mask_sum = neut_mask.sum()\n neut_mask_sum = neut_mask_sum if neut_mask_sum > 0 else 1.0\n neut_loss = (neut_loss * neut_mask).sum() / neut_mask_sum\n\n # Final loss\n loss = neut_loss\n\n # Compute metrics (ignore neg label 0)\n self.neut_auc_sars1(\n torch.sigmoid(neut_logits[:, 0]),\n neut_label[:, 0].long(),\n neut_mask[:, 0].bool(),\n )\n self.neut_auc_sars2(\n torch.sigmoid(neut_logits[:, 1]),\n neut_label[:, 1].long(),\n neut_mask[:, 1].bool(),\n )\n return loss\n\n def training_step(self, batch, batch_idx):\n loss = self.compute_metrics(batch)\n self.log(\"loss\", loss, prog_bar=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n loss = self.compute_metrics(batch)\n self.log(\"val_loss\", loss, prog_bar=True)\n\n def training_epoch_end(self, output):\n try:\n neut_auc_sars1 = self.neut_auc_sars1.compute()\n except Exception:\n neut_auc_sars1 = 0.5\n\n try:\n neut_auc_sars2 = self.neut_auc_sars2.compute()\n except Exception:\n neut_auc_sars2 = 0.5\n\n self.log(\"train_auc_sars_cov_1\", neut_auc_sars1, prog_bar=False)\n self.log(\"train_auc_sars_cov_2\", neut_auc_sars2, prog_bar=False)\n self.neut_auc_sars1.reset()\n self.neut_auc_sars2.reset()\n\n def validation_epoch_end(self, output):\n try:\n neut_auc_sars1 = self.neut_auc_sars1.compute()\n except Exception as e:\n print(e)\n neut_auc_sars1 = 0.5\n\n try:\n neut_auc_sars2 = self.neut_auc_sars2.compute()\n except Exception:\n neut_auc_sars2 = 0.5\n\n self.log(\"auc\", (neut_auc_sars1 + neut_auc_sars2) / 2, prog_bar=True)\n self.log(\"auc_sars_cov_1\", neut_auc_sars1, prog_bar=True)\n self.log(\"auc_sars_cov_2\", neut_auc_sars2, prog_bar=True)\n self.neut_auc_sars1.reset()\n self.neut_auc_sars2.reset()\n\n def test_step(self, batch, batch_idx):\n return self.validation_step(batch, batch_idx)\n\n def test_epoch_end(self, output):\n return self.validation_epoch_end(output)\n\n def configure_optimizers(self):\n return RAdam((p for p in self.parameters() if p.requires_grad), lr=self.lr)\n","repo_name":"wengong-jin/RefineGNN","sub_path":"neut_model.py","file_name":"neut_model.py","file_ext":"py","file_size_in_byte":13964,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"81"} +{"seq_id":"42705266567","text":"import argparse\nimport textwrap\n\nfrom cliff import sphinxext\nfrom cliff.tests import base\n\n\nclass TestSphinxExtension(base.TestBase):\n\n def test_empty_help(self):\n \"\"\"Handle positional and optional actions without help messages.\"\"\"\n parser = argparse.ArgumentParser(prog='hello-world', add_help=False)\n parser.add_argument('name', action='store')\n parser.add_argument('--language', dest='lang')\n\n output = '\\n'.join(sphinxext._format_parser(parser))\n self.assertEqual(textwrap.dedent(\"\"\"\n .. program:: hello-world\n .. code-block:: shell\n\n hello-world [--language LANG] name\n\n .. option:: --language \n\n .. option:: name\n \"\"\").lstrip(), output)\n\n def test_nonempty_help(self):\n \"\"\"Handle positional and optional actions with help messages.\"\"\"\n parser = argparse.ArgumentParser(prog='hello-world', add_help=False)\n parser.add_argument('name', help='user name')\n parser.add_argument('--language', dest='lang',\n help='greeting language')\n\n output = '\\n'.join(sphinxext._format_parser(parser))\n self.assertEqual(textwrap.dedent(\"\"\"\n .. program:: hello-world\n .. code-block:: shell\n\n hello-world [--language LANG] name\n\n .. option:: --language \n\n greeting language\n\n .. option:: name\n\n user name\n \"\"\").lstrip(), output)\n\n def test_description_epilog(self):\n \"\"\"Handle a parser description, epilog.\"\"\"\n parser = argparse.ArgumentParser(prog='hello-world', add_help=False,\n description='A \"Hello, World\" app.',\n epilog='What am I doing down here?')\n parser.add_argument('name', action='store')\n parser.add_argument('--language', dest='lang')\n\n output = '\\n'.join(sphinxext._format_parser(parser))\n self.assertEqual(textwrap.dedent(\"\"\"\n A \"Hello, World\" app.\n\n .. program:: hello-world\n .. code-block:: shell\n\n hello-world [--language LANG] name\n\n .. option:: --language \n\n .. option:: name\n\n What am I doing down here?\n \"\"\").lstrip(), output)\n\n def test_flag(self):\n \"\"\"Handle a boolean argparse action.\"\"\"\n parser = argparse.ArgumentParser(prog='hello-world', add_help=False)\n parser.add_argument('name', help='user name')\n parser.add_argument('--translate', action='store_true',\n help='translate to local language')\n\n output = '\\n'.join(sphinxext._format_parser(parser))\n self.assertEqual(textwrap.dedent(\"\"\"\n .. program:: hello-world\n .. code-block:: shell\n\n hello-world [--translate] name\n\n .. option:: --translate\n\n translate to local language\n\n .. option:: name\n\n user name\n \"\"\").lstrip(), output)\n\n def test_supressed(self):\n \"\"\"Handle a supressed action.\"\"\"\n parser = argparse.ArgumentParser(prog='hello-world', add_help=False)\n parser.add_argument('name', help='user name')\n parser.add_argument('--variable', help=argparse.SUPPRESS)\n\n output = '\\n'.join(sphinxext._format_parser(parser))\n self.assertEqual(textwrap.dedent(\"\"\"\n .. program:: hello-world\n .. code-block:: shell\n\n hello-world name\n\n\n .. option:: name\n\n user name\n \"\"\").lstrip(), output)\n\n def test_metavar(self):\n \"\"\"Handle an option with a metavar.\"\"\"\n parser = argparse.ArgumentParser(prog='hello-world', add_help=False)\n parser.add_argument('names', metavar='', nargs='+',\n help='a user name')\n\n output = '\\n'.join(sphinxext._format_parser(parser))\n self.assertEqual(textwrap.dedent(\"\"\"\n .. program:: hello-world\n .. code-block:: shell\n\n hello-world [ ...]\n\n .. option:: NAME\n\n a user name\n \"\"\").lstrip(), output)\n\n def test_multiple_opts(self):\n \"\"\"Correctly output multiple opts on separate lines.\"\"\"\n parser = argparse.ArgumentParser(prog='hello-world', add_help=False)\n parser.add_argument('name', help='user name')\n parser.add_argument('--language', dest='lang',\n help='greeting language')\n parser.add_argument('--translate', action='store_true',\n help='translate to local language')\n parser.add_argument('--write-to-var-log-something-or-other',\n action='store_true',\n help='a long opt to force wrapping')\n parser.add_argument('--required-arg', dest='stuff', required=True,\n help='a required argument')\n style_group = parser.add_mutually_exclusive_group(required=True)\n style_group.add_argument('--polite', action='store_true',\n help='use a polite greeting')\n style_group.add_argument('--profane', action='store_true',\n help='use a less polite greeting')\n\n output = '\\n'.join(sphinxext._format_parser(parser))\n self.assertEqual(textwrap.dedent(\"\"\"\n .. program:: hello-world\n .. code-block:: shell\n\n hello-world\n [--language LANG]\n [--translate]\n [--write-to-var-log-something-or-other]\n --required-arg STUFF\n (--polite | --profane)\n name\n\n .. option:: --language \n\n greeting language\n\n .. option:: --translate\n\n translate to local language\n\n .. option:: --write-to-var-log-something-or-other\n\n a long opt to force wrapping\n\n .. option:: --required-arg \n\n a required argument\n\n .. option:: --polite\n\n use a polite greeting\n\n .. option:: --profane\n\n use a less polite greeting\n\n .. option:: name\n\n user name\n \"\"\").lstrip(), output)\n\n def test_various_option_names_with_hyphen(self):\n \"\"\"Handle options whose name and/or metavar contain hyphen(s)\"\"\"\n parser = argparse.ArgumentParser(prog='hello-world', add_help=False)\n parser.add_argument('--foo-bar', metavar='',\n help='foo bar', required=True)\n parser.add_argument('--foo-bar-baz', metavar='',\n help='foo bar baz', required=True)\n parser.add_argument('--foo', metavar='',\n help='foo', required=True)\n parser.add_argument('--alpha', metavar='
    ',\n help='alpha')\n parser.add_argument('--alpha-beta', metavar='',\n help='alpha beta')\n parser.add_argument('--alpha-beta-gamma', metavar='',\n help='alpha beta gamma')\n\n output = '\\n'.join(sphinxext._format_parser(parser))\n self.assertEqual(textwrap.dedent(\"\"\"\n .. program:: hello-world\n .. code-block:: shell\n\n hello-world\n --foo-bar \n --foo-bar-baz \n --foo \n [--alpha ]\n [--alpha-beta ]\n [--alpha-beta-gamma ]\n\n .. option:: --foo-bar \n\n foo bar\n\n .. option:: --foo-bar-baz \n\n foo bar baz\n\n .. option:: --foo \n\n foo\n\n .. option:: --alpha \n\n alpha\n\n .. option:: --alpha-beta \n\n alpha beta\n\n .. option:: --alpha-beta-gamma \n\n alpha beta gamma\n \"\"\").lstrip(), output)\n","repo_name":"openstack/cliff","sub_path":"cliff/tests/test_sphinxext.py","file_name":"test_sphinxext.py","file_ext":"py","file_size_in_byte":7870,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"81"} +{"seq_id":"72921002824","text":"from django import forms\n\n\n\nfrom billings.models import Billing\n\n\nclass BillingForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(BillingForm, self).__init__(*args, **kwargs)\n\n self.fields['full_name'].widget.attrs.update(\n {\n 'placeholder': 'Enter First Name',\n }\n )\n\n for field in self.fields:\n self.fields[field].required = True\n\n # self.fields['password1'].error_messages.update({\n # 'required': 'Password is required'\n # })\n\n\n class Meta:\n model = Billing\n # fields = ['email', 'username', 'first_name',\n # 'last_name', 'password1', 'password2', ]\n\n exclude = ('user', 'timestamp', 'products') \n\n\n\n\n\n","repo_name":"Sany07/EdTech-Platform-Django-SSlcommerz","sub_path":"carts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"32944975644","text":"import os.path\r\nfrom sqlalchemy import update\r\nfrom flask import Flask\r\nimport sqlalchemy\r\nfrom flask import render_template\r\nfrom flask import request, redirect\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy import ForeignKey\r\napp = Flask(__name__, template_folder=\"templates\")\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.sqlite3'\r\ndb = SQLAlchemy()\r\ndb.init_app(app)\r\napp.app_context().push()\r\n\r\nclass Student(db.Model):\r\n __tablename__ = \"student\"\r\n student_id = db.Column(db.Integer, primary_key=True, autoincrement=True)\r\n roll_number = db.Column(db.String, unique=True, nullable=False)\r\n first_name =db.Column(db.String, nullable=False)\r\n last_name =db.Column(db.String)\r\n\r\nclass Course(db.Model):\r\n __tablename__ = \"course\"\r\n course_id = db.Column(db.Integer, primary_key = True, autoincrement = True)\r\n course_code = db.Column(db.String, unique=True, nullable=False)\r\n course_name = db.Column(db.String, nullable=False)\r\n course_description=db.Column(db.String)\r\n\r\nclass Enrollments(db.Model):\r\n __tablename__ = \"enrollments\"\r\n enrollment_id = db.Column(db.Integer, primary_key = True, autoincrement = True)\r\n estudent_id = db.Column(db.Integer, db.ForeignKey(\"student.student_id\"), nullable=False)\r\n ecourse_id =db.Column(db.Integer, db.ForeignKey(\"course.course_id\"), nullable=False)\r\n\r\n@app.route(\"/\", methods= [\"GET\",\"POST\"])\r\ndef All_student():\r\n students = Student.query.all()\r\n return render_template(\"all_students.html\", students=students)\r\n\r\n@app.route(\"/student/create\", methods= [\"GET\"])\r\ndef create_student_get():\r\n return render_template(\"add_student.html\")\r\n\r\n@app.route(\"/student/create\", methods= [\"POST\"])\r\ndef create_student_post():\r\n try:\r\n form = request.form\r\n std_roll_no = form.get(\"roll\")\r\n std_fname = form.get(\"f_name\")\r\n std_lname = form.get(\"l_name\")\r\n student = Student(roll_number=std_roll_no, first_name=std_fname, last_name=std_lname)\r\n db.session.add(student)\r\n db.session.flush()\r\n sid = student.student_id\r\n course_list = form.getlist(\"courses\")\r\n course_dict = {\"course_1\": 1, \"course_2\": 2, \"course_3\": 3, \"course_4\": 4}\r\n for course in course_list:\r\n enrollment = Enrollments(estudent_id=sid, ecourse_id=course_dict[course])\r\n db.session.add(enrollment)\r\n db.session.flush()\r\n db.session.commit()\r\n except sqlalchemy.exc.IntegrityError as e:\r\n print(\"Duplicate\")\r\n return render_template(\"duplicate_rollnumber.html\")\r\n except Exception as e:\r\n print(\"Error\", e)\r\n return render_template(\"error.html\")\r\n return redirect(\"/\")\r\n@app.route('/student/', methods=['GET'])\r\ndef students_details_get(student_id):\r\n student = Student.query.filter(Student.student_id == student_id).first()\r\n enrollments = Enrollments.query.filter(Enrollments.estudent_id == student_id).all()\r\n course_ids = [i.ecourse_id for i in enrollments]\r\n course_list = []\r\n for index, cid in enumerate(course_ids):\r\n course = Course.query.filter(Course.course_id == cid).first()\r\n course_list.append([index + 1, course.course_code, course.course_name, course.course_description])\r\n return render_template(\"personal.html\", students=student, courses=course_list)\r\n\r\n\r\n@app.route(\"/student//delete\", methods=[\"GET\"])\r\ndef delete_students_get(student_id):\r\n Student.query.filter(Student.student_id==student_id).delete()\r\n db.session.commit()\r\n Enrollments.query.filter(Enrollments.estudent_id==student_id).delete()\r\n db.session.commit()\r\n return redirect(\"/\")\r\n \r\n@app.route('/student//update', methods=['GET', 'POST'])\r\ndef update(stud_id):\r\n if request.method == 'GET':\r\n Stu = Student.query.get(stud_id)\r\n return render_template('update.html', student=Stu)\r\n\r\n elif request.method == 'POST':\r\n first_name = request.form['f_name']\r\n print(first_name)\r\n last_name = request.form['l_name']\r\n \r\n Stu = Student.query.get(stud_id)\r\n Stu.first_name = first_name\r\n Stu.last_name = last_name\r\n \r\n\r\n Enrollments.query.filter_by(estudent_id=stud_id).delete()\r\n db.session.flush()\r\n courses = request.form.getlist('courses')\r\n for course in courses:\r\n enroll = Enrollments(estudent_id=stud_id,\r\n ecourse_id=int(course[-1]))\r\n db.session.add(enroll)\r\n\r\n db.session.commit()\r\n return redirect('/')\r\n\r\n\r\nif __name__== \"__main__\":\r\n app.run(host= '0.0.0.0',\r\n debug = True,\r\n port = 8080)\r\n ","repo_name":"startingpyth/MAD_assignment","sub_path":"week5_graded.py","file_name":"week5_graded.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28103758434","text":"#-*-coding:utf-8-*-\nfrom __future__ import print_function\nimport codecs\nfrom util.logger import logger\nfrom util.logger import lg\nimport re\nimport json\nimport Levenshtein\n\n# f = open(\"thread.txt\",\"r\",encoding=\"utf-8\")\n\n# # 没用\n# # content=f.read().decode(\"utf-8\").encode(\"utf-8\")\n# # logger.info(content)\n\n# # line = f.readline()\n# line = f.readline().decode(\"utf-8\", errors=\"ignore\").encode(\"utf-8\")\n# # line = f.readline().decode(\"gbk\", errors=\"ignore\").encode(\"utf-8\")\n\n# for i in range(0,20):\n# # while line:\n# l = line\n# l = line.decode(\"utf-8\").encode(\"utf-8\")\n# # l = line.decode(\"gb2312\").encode(\"gb2312\")\n# # logger.info(line)\n \n# # print(l)\n# # logger.info(l)\n\n# # line = f.readline()\n# line = f.readline().decode(\"utf-8\", errors=\"ignore\").encode(\"utf-8\")\n# # line = f.readline().decode(\"gbk\", errors=\"ignore\").encode(\"utf-8\")\n\n# f.close()\n\n\n\n#解析日志生成json\n# regThreadId = r\"(?<=id = ).*$\"\n# regThreadName = r\"(?<=name = ).*$\"\n\n# res=[]\n# # oneTimeData = {\"time\":\"\",\"threads\":[]}\n# threads=[]\n# thread = {\"id\":\"\",\"name\":\"\"}\n# with codecs.open(\"thread.txt\",\"r\",encoding=\"utf-8\") as f:\n# cnt = 0\n# for line in f:\n# logger.info(line.strip())\n\n# if \"test log\" in line:\n# # oneTimeData = {\"time\":line[:14],\"threads\":[]}\n# # res.append(oneTimeData)\n\n# threads = []\n# res.append({\"time\":line[:14],\"threads\":threads})\n\n# if \"name = \" in line:\n# thread = {\"id\":\"\",\"name\":\"\"}\n# threads.append(thread)\n# thread[\"name\"] = re.search(regThreadName,line.strip(),flags=0).group()\n# continue\n\n# if \"id = \" in line:\n# thread[\"id\"] = re.search(regThreadId,line.strip(),flags=0).group()\n# continue\n\n# cnt=cnt+1\n# # if cnt>100:\n# # break\n\n# logger.info(res)\n#解析日志生成json\n\n\n#解析json文件\n# f = open(\"thread_analyzed.json\")\n# j = f.read()\n\n# # logger.info(j)\n\n# p = json.loads(j)\n\n# # logger.info(p)\n\n# for k1,v1 in p.items():\n# # logger.info(\"k1\")\n# # logger.info(k1)\n# # logger.info(\"v1\")\n# # logger.info(v1)\n\n# for v2 in v1:\n# logger.info(v2[\"time\"])\n# logger.info(\"threads\")\n# for v3 in v2[\"threads\"]:\n# for k4,v4 in v3.items():\n# logger.info(\"%s : %s\"%(k4,v4))\n\n# f.close()\n#解析json文件\n\n#解析json+计算相似度\n\n\n#jaro = Levenshtein.jaro(str1,str2)\n#Levenshtein.median([str1,str2,str3,str4,str5,str6,str7,str8])\n\n\nf = open(\"thread_analyzed.json\")\nj = f.read()\n\np = json.loads(j)\n\nf.close()\n\nlastGroups = {}\n\nfor key,data in p.items():\n # logger.info(\"k1\")\n # logger.info(k1)\n # logger.info(\"v1\")\n # logger.info(v1)\n\n for otRes in data:\n lg(u\"========================单次上报检测start========================\")\n logger.info(otRes[\"time\"])\n\n # logger.info(\"threads\")\n # for thread in otRes[\"threads\"]:\n # logger.info(\"%s : %s\"%(thread[\"id\"],thread[\"name\"]))\n\n # i=0\n # j=0\n # for i in range(len(otRes[\"threads\"])-1):\n # for j in range(i+1,len(otRes[\"threads\"])):\n # jaro = Levenshtein.jaro(otRes[\"threads\"][i],otRes[\"threads\"][j])\n # logger.info(\"对比:%s - %s - 相似度:%s\"%(otRes[\"threads\"][i],otRes[\"threads\"][j],jaro))\n\n # if jaro > 0.85:\n\n i = 0\n threads = otRes[\"threads\"]\n groups = {}\n while i < len(threads):\n group = []\n strGroup = []#用于快速统计字符串公共部分\n # lg(u\"对比 - %s\"%threads[i][\"name\"])\n j = i+1\n while j < len(threads):\n # base = threads[i][\"name\"]\n jaro = Levenshtein.jaro(threads[i][\"name\"],threads[j][\"name\"])\n # logger.info(u\"对比:%s - %s - 相似度:%s\"%(threads[i][\"name\"],threads[j][\"name\"],jaro))\n if jaro > 0.85:\n if len(group) == 0:\n # lg(u\"第一次,两个值都加入group\")\n group.append({\"name\":threads[i][\"name\"],\"id\":threads[i][\"id\"]})\n strGroup.append(threads[i][\"name\"])\n group.append({\"name\":threads[j][\"name\"],\"id\":threads[j][\"id\"]})\n strGroup.append(threads[j][\"name\"])\n del(threads[j])\n else:\n j=j+1#光标控制\n if len(group) == 0:\n # lg(u\"group为空,下一次\")\n i=i+1#光标控制\n else:\n del(threads[i])\n # lg(u\"当前\")\n # lg(group)\n # lg(u\"共通分析:%s\"%Levenshtein.median(strGroup))\n # lg(u\"共通分析:%s\"%Levenshtein.setmedian(strGroup))\n\n # lg(u\"共通分析:%s\"%Levenshtein.quickmedian(strGroup))\n key = Levenshtein.quickmedian(strGroup)\n # groups.append({key:group})\n groups[key] = group\n\n \n lg(u\"最终结果\")\n lg(groups)\n lg(u\"========================单次上报检测end========================\")\n\n if len(lastGroups) > 0:\n for k,group in groups.items():\n if lastGroups.has_key(k):\n lg(\"key = %s\"%k)\n if len(lastGroups[k]) != len(group):\n lg(u\"有变化\")\n lg(lastGroups[k])\n lg(groups[k])\n # else:\n # lg(u\"没有变化\")\n lastGroups = groups\n\n\n\n # lastGroups = groups\n\n\n\n\n\n#解析json+计算相似度","repo_name":"sephirothz87/PyUtil","sub_path":"T_ThreadData.py","file_name":"T_ThreadData.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15059910780","text":"###################################################\n# This file was developed by ToanLK\n# It is released under BSD, MIT and GPL2 licenses\n# Version 0.1 Date: 22/02/2012\n# Version 0.2 Date: 20/01/2014\n###################################################\nfrom gluon import current, HTTP\nfrom html import *\nfrom gluon.dal import Field\nfrom validators import IS_EMPTY_OR, IS_NOT_EMPTY, IS_IMAGE, IS_NULL_OR, IS_IN_DB, IS_IN_SET, IS_NOT_IN_DB\nimport os\n\n\n#####################################################################\n## BOX TABLE\n\n\t\n\nclass Page:\n\tdef __init__(self,**attr):\n\t\tself.object = attr.get('object',None)\n\t\ttry:\n\t\t\tself.db = self.object.db\n\t\t\tself.auth = self.object.auth\n\t\t\tself.div_id = self.object.div_id\n\t\t\tself.folder = self.object.folder.id\n\t\t\tself.vars = current.request.vars\n\t\texcept:\n\t\t\tself.db = current.globalenv['db']\n\t\t\tself.auth = current.globalenv['auth']\n\t\t\tself.div_id = ''\n\t\t\tself.folder = current.request.vars.folder_id\n\t\t\tself.vars = None\t\t\n\t\t\t\n\t\t#self.box = define_box(self.db,self.auth) \n\t\t#self.fbox = define_folder_box(self.db,self.auth) \n\n\tdef define_page(self,migrate=False):\t\n\t\tdb = self.db\n\t\tif 'page' in db.tables: return db.page\n\t\treturn db.define_table('page',\n\t\t\tField('name',unique=True,requires=IS_NOT_EMPTY()),\n\t\t\tField('description','text',default=''),\n\t\t\tField('content','text',default=''),\n\t\t\tmigrate=migrate)\t\t\t\t\t\n\t\t\t\n\tdef show(self,name,context={}):\n\t\ttry:\n\t\t\timport cStringIO\n\t\t\tbox = self.box(name) if isinstance(name,int) else self.db(self.box.name==name).select().first() \n\t\t\tif not box: return 'Box %s not exist'%name\n\t\t\tcontent = box.content\n\t\t\tcontent = content.replace('"', \"'\")\n\t\t\tcontent = content.replace(''', '\"')\t\n\t\t\tcontent = '
    %s %s
    '%(box.id,box.name,box.data,content)\n\t\t\ttry:\n\t\t\t\tif 'header' not in context.keys(): context['header'] = H3(current.T(box.name))\n\t\t\t\tsettings = eval(box.setting.replace(chr(13),''))\n\t\t\t\tfor key in settings.keys(): context[key] = settings[key]\n\t\t\texcept: pass\n\t\t\tcontent = current.response.render(cStringIO.StringIO(content), dict(context=context))\n\t\t\treturn XML(content)\n\t\texcept:\n\t\t\treturn 'Box %s error: %s'%(box.name,box.content)\n\t\t\n\tdef render_box(self,box,context={}):\n\t\ttry:\n\t\t\timport cStringIO\n\t\t\tcontent = box.content\n\t\t\tcontent = content.replace('"', \"'\")\n\t\t\tcontent = content.replace(''', '\"')\t\n\t\t\tcontent = '
    %s %s
    '%(box.id,box.name,box.data,content)\n\t\t\ttry:\n\t\t\t\tif 'header' not in context.keys(): context['header'] = H3(current.T(box.name))\n\t\t\t\tsettings = eval(box.setting.replace(chr(13),''))\n\t\t\t\tfor key in settings.keys(): context[key] = settings[key]\n\t\t\texcept: pass\n\t\t\tcontent = current.response.render(cStringIO.StringIO(content), dict(context=context))\n\t\t\treturn XML(content)\n\t\texcept:\n\t\t\treturn 'Box %s error: %s'%(box.name,box.content)\n\n\tdef render(self,div='left',type=None):\n\t\tlinks = ''\n\t\tdb = self.db\n\t\tif not type: \n\t\t\tif self.object:\n\t\t\t\ttype='content' if self.object.id else 'folder'\n\t\t\telif self.folder: type = 'folder'\n\t\t\telse: type = 'link'\n\t\tquery = (db.folder_box.active==True)&(db.folder_box.divid==div)&(db.folder_box.folder==self.folder)&(db.box.id==db.folder_box.box)\n\t\trows = db(query&(db.folder_box.type==type)).select(db.folder_box.box,db.folder_box.position,orderby=db.folder_box.position,distinct=True)\n\t\tfor row in rows: links += str(self.render_box(db.box(row.box),{}))\n\t\tif (type=='folder')&(links!=''):\n\t\t\tposition = rows[0].position \n\t\t\trows = db(query&(db.folder_box.position<=position)&(db.folder_box.type=='link')).select(db.box.id,orderby=db.folder_box.position,distinct=True)\n\t\t\tfor row in rows: links = str(self.render_box(db.box(row.id))) + links \n\t\t\trows = db(query&(db.folder_box.position>position)&(db.folder_box.type=='link')).select(db.box.id,orderby=db.folder_box.position,distinct=True)\n\t\t\tfor row in rows: links = links + str(self.render_box(db.box(row.id))) \n\t\treturn XML(links)\n\t\t","repo_name":"dangquocdung/sgd","sub_path":"modules/plugin_page.py","file_name":"plugin_page.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20002371349","text":"#! /usr/bin/env python\nimport time\nimport copy\nfrom collections import OrderedDict\nfrom .refine import equal\n\n \ndef compare_rxns(rxn1, rxn2):\n \"\"\"\n Checking to see whether two reaction pathways are the same.\n \n parameters\n ----------\n rxn1, rxn2: [Molecule objects]\n Molecule objects reactant, TS, and product in a list.\n \n Return\n ----------\n \"True\" if they are identical. \n \"False\" if they are different.\n \"\"\"\n L1 = len(rxn1)\n L2 = len(rxn2)\n rxns = {L1: rxn1, L2: rxn2}\n\n if L1 == L2:\n equal_count = 0\n for M1, M2 in zip(rxn1, rxn2):\n if equal(M1, M2):\n equal_count += 1\n\n if equal_count == L1:\n return True\n\n equal_count = 0\n for M1, M2 in zip(rxn1[::-1], rxn2):\n if equal(M1, M2):\n equal_count += 1\n\n if equal_count == L1:\n return True\n else:\n shorter = rxns[min(rxns)]\n longer = rxns[max(rxns)]\n ite_num = int(abs(L1/3-L2/3) + 1)\n equal_count = 0\n for i in range(ite_num):\n for M1, M2 in zip(shorter, longer[i*3:]):\n if equal(M1, M2):\n equal_count += 1\n if equal_count == min(L1, L2):\n return True\n\n for M1, M2 in zip(shorter, longer[i*3:][::-1]):\n if equal(M1, M2):\n equal_count += 1\n if equal_count == min(L1, L2):\n return True\n\n return False\n\ndef check_repeat(M):\n if len(M) <= 6:\n return False\n\n for i in range(len(M)):\n if i%3 == 0:\n for j in range(int(len(M)/3)): \n if j*3>i:\n if equal(M[i],M[j*3]) or equal(M[i], M[-1]):\n return True\n return False\n \n\n\ndef filterTS(M_info, E1):\n temp = copy.deepcopy(M_info)\n for k, v in M_info.items():\n num = int(len(v)/3)\n Final_E = float(v[-1].qm_energies[0])\n if Final_E > E1:\n continue\n for i in range(num):\n Rct_E = float(v[i*3].qm_energies[0])\n\n TS_E = float(v[i*3+1].qm_energies[0])\n \n Prd_E = float(v[i*3+2].qm_energies[0])\n\n if TS_E > Rct_E and TS_E > Prd_E:\n if TS_E > E1:\n del temp[k]\n break \n else:\n print(\"Transition state has lower energy than reactant/product.\")\n\n return temp\n\ndef connect_rxns(M_info, outsiders = None, iteration=0):\n \"\"\"\n This function will connect unit reactions.\n \n Parameters\n ----------\n M_info : OrderedDict\n OrderedDict with frames in key and Molcule objects in a list [reactant, TS, product]\n \n Return\n ----------\n final : OrderedDict\n Molecule objects consist of unit reactions \n \"\"\"\n print(\"-----------Iteration %i-----------\"%iteration)\n rxns = OrderedDict()\n if outsiders is None:\n outsiders = OrderedDict()\n outsiders_temp = copy.deepcopy(M_info)\n filtered = copy.deepcopy(M_info)\n if iteration == 0:\n print(\"Filtering identical reaction pathways..\")\n for i, (k1, v1) in enumerate(M_info.items()):\n for j, (k2, v2) in enumerate(M_info.items()):\n if j > i:\n if compare_rxns(v1, v2):\n if len(v1) >= len(v2) and k2 in filtered.keys():\n del filtered[k2]\n elif len(v1) < len(v2) and k1 in filtered.keys():\n del filtered[k1]\n\n\n filtered_ratio=1-len(filtered)/len(M_info)\n print(\"{:.2f}% of unit reactions were filtered ({i} unit reactions are unique out of {i})\".format(filtered_ratio*100, len(filtered), len(M_info)))\n\n connect = 0\n print('Detecting connection points..')\n for i, (k1, v1) in enumerate(filtered.items()):\n for j, (k2, v2) in enumerate(filtered.items()):\n if j > i:\n reac1 = v1[0]\n prod1 = v1[-1]\n reac2 = v2[0]\n prod2 = v2[-1]\n frm = k1 +\"/\"+ k2\n if equal(reac1, reac2):\n M = v2[::-1] + v1\n if check_repeat(M):\n continue\n else:\n try:\n del outsiders_temp[k1], outsiders_temp[k2]\n except:\n pass\n rxns[frm]=M\n connect += 1\n\n elif equal(reac1, prod2):\n M = v2 + v1\n if check_repeat(M):\n continue\n else:\n try:\n del outsiders_temp[k1], outsiders_temp[k2]\n except:\n pass\n rxns[frm] = M\n connect += 1\n\n elif equal(prod1, reac2):\n M = v1 + v2\n if check_repeat(M):\n continue\n else:\n try:\n del outsiders_temp[k1], outsiders_temp[k2]\n except:\n pass\n rxns[frm] = M\n connect += 1\n\n elif equal(prod1, prod2):\n M = v1 + v2[::-1]\n if check_repeat(M):\n continue\n else:\n try:\n del outsiders_temp[k1], outsiders_temp[k2]\n except:\n pass\n rxns[frm] = M\n connect += 1\n outsiders.update(outsiders_temp)\n print(\"Connections\", connect)\n print(\"Number of connected rxns\", len(rxns))\n print(\"Number of outsiders\", len(outsiders))\n\n if connect == 0:\n print(\"Final filtering of the outsiders\")\n filtered = copy.deepcopy(outsiders)\n for i, (k1, v1) in enumerate(outsiders.items()):\n for j, (k2, v2) in enumerate(outsiders.items()):\n if j > i:\n if compare_rxns(v1, v2):\n if len(v1) >= len(v2) and k2 in filtered.keys():\n del filtered[k2]\n elif len(v1) < len(v2) and k1 in filtered.keys():\n del filtered[k1]\n\n print('Done! %i reactions were filtered from %i reactions.' %(len(filtered), len(outsiders)))\n return filtered#final\n else:\n iteration += 1\n return connect_rxns(rxns, outsiders, iteration)\n\n\nif __name__=='__main__':\n pass \n\n\n","repo_name":"hjnpark/QCARWorkflow","sub_path":"qcarw/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15879168936","text":"from math import ceil\n\nfrom odoo import _, api, fields, models\nfrom odoo.exceptions import ValidationError\nfrom odoo.osv import expression\n\n\nclass ProductMRPArea(models.Model):\n _name = \"product.mrp.area\"\n _description = \"Zone de fabrication produit\"\n\n active = fields.Boolean(default=True)\n mrp_area_id = fields.Many2one(comodel_name=\"mrp.area\", required=True)\n company_id = fields.Many2one(\n comodel_name=\"res.company\",\n related=\"mrp_area_id.warehouse_id.company_id\",\n store=True,\n )\n product_id = fields.Many2one(\n comodel_name=\"product.product\", required=True, string=\"Produit\"\n )\n product_tmpl_id = fields.Many2one(\n comodel_name=\"product.template\",\n readonly=True,\n related=\"product_id.product_tmpl_id\",\n store=True,\n )\n location_id = fields.Many2one(related=\"mrp_area_id.location_id\")\n location_proc_id = fields.Many2one(\n string=\"Procure Location\",\n comodel_name=\"stock.location\",\n domain=\"[('location_id', 'child_of', location_id)]\",\n help=\"Définissez ceci si vous devez vous approvisionner à partir d'un autre emplacement\"\n \"que l'emplacement de la zone.\",\n )\n # TODO: applicable and exclude... redundant??\n mrp_applicable = fields.Boolean(string=\"Ordre de fabrication applicable\")\n mrp_exclude = fields.Boolean(string=\"Exclure de l'ordre de fabrication\")\n mrp_inspection_delay = fields.Integer(string=\"Délai d'inspection\")\n mrp_maximum_order_qty = fields.Float(string=\"Quantité maximale de commande\", default=0.0)\n mrp_minimum_order_qty = fields.Float(string=\"Quantité minimum de commande\", default=0.0)\n mrp_minimum_stock = fields.Float(string=\"Stock de Sécurité\")\n mrp_nbr_days = fields.Integer(\n string=\"Nb. Jours\",\n default=0,\n help=\"Nombre de jours pour regrouper la demande de ce produit pendant la\"\n \"Exécution MRP, afin de déterminer la quantité à commander.\",\n )\n mrp_qty_multiple = fields.Float(string=\"Qty Multiple\", default=1.00)\n mrp_transit_delay = fields.Integer(string=\"Transit Delay\", default=0)\n mrp_verified = fields.Boolean(\n string=\"Vérifié pour la fabrication\",\n help=\"Identifie que ce produit a été vérifié \"\n \"être valable pour la fabrication.\",\n )\n mrp_lead_time = fields.Float(string=\"Délai de mise en œuvre\", compute=\"_compute_mrp_lead_time\")\n distribution_lead_time = fields.Float()\n main_supplier_id = fields.Many2one(\n comodel_name=\"res.partner\",\n string=\"Fournisseur principal\",\n compute=\"_compute_main_supplier\",\n store=True,\n index=True,\n )\n main_supplierinfo_id = fields.Many2one(\n comodel_name=\"product.supplierinfo\",\n string=\"Informations sur le fournisseur\",\n compute=\"_compute_main_supplier\",\n store=True,\n )\n supply_method = fields.Selection(\n selection=[\n (\"buy\", \"Achat\"),\n (\"none\", \"Non défini\"),\n (\"manufacture\", \"Fabrication\"),\n (\"phantom\", \"Ensemble d'Outils\"),\n (\"pull\", \"Stratégie pull \"),\n (\"push\", \"Stratégie push\"),\n (\"pull_push\", \"Stratégie push et pull \"),\n ],\n compute=\"_compute_supply_method\",\n )\n qty_available = fields.Float(\n string=\"Quantité disponible\", compute=\"_compute_qty_available\"\n )\n mrp_move_ids = fields.One2many(\n comodel_name=\"mrp.move\", inverse_name=\"product_mrp_area_id\", readonly=True\n )\n planned_order_ids = fields.One2many(\n comodel_name=\"mrp.planned.order\",\n inverse_name=\"product_mrp_area_id\",\n readonly=True,\n )\n mrp_planner_id = fields.Many2one(\"res.users\")\n\n _sql_constraints = [\n (\n \"product_mrp_area_uniq\",\n \"unique(product_id, mrp_area_id)\",\n \"La combinaison produit/paramètres de la zone MRP doit être unique.\",\n )\n ]\n\n @api.constrains(\n \"mrp_minimum_order_qty\",\n \"mrp_maximum_order_qty\",\n \"mrp_qty_multiple\",\n \"mrp_minimum_stock\",\n \"mrp_nbr_days\",\n )\n def _check_negatives(self):\n values = self.read(\n [\n \"mrp_minimum_order_qty\",\n \"mrp_maximum_order_qty\",\n \"mrp_qty_multiple\",\n \"mrp_minimum_stock\",\n \"mrp_nbr_days\",\n ]\n )\n for rec in values:\n if any(v < 0 for v in rec.values()):\n raise ValidationError(_(\"Vous ne pouvez pas utiliser un nombre négatif.\"))\n\n def name_get(self):\n return [\n (\n area.id,\n \"[{}] {}\".format(area.mrp_area_id.name, area.product_id.display_name),\n )\n for area in self\n ]\n\n @api.model\n def _name_search(\n self, name, args=None, operator=\"ilike\", limit=100, name_get_uid=None\n ):\n if operator in (\"ilike\", \"like\", \"=\", \"=like\", \"=ilike\"):\n args = expression.AND(\n [\n args or [],\n [\n \"|\",\n \"|\",\n (\"product_id.name\", operator, name),\n (\"product_id.default_code\", operator, name),\n (\"mrp_area_id.name\", operator, name),\n ],\n ]\n )\n return super(ProductMRPArea, self)._name_search(\n name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid\n )\n\n def _compute_mrp_lead_time(self):\n produced = self.filtered(lambda r: r.supply_method == \"manufacture\")\n purchased = self.filtered(lambda r: r.supply_method == \"buy\")\n distributed = self.filtered(\n lambda r: r.supply_method in (\"pull\", \"push\", \"pull_push\")\n )\n for rec in produced:\n rec.mrp_lead_time = rec.product_id.produce_delay\n for rec in purchased:\n rec.mrp_lead_time = rec.main_supplierinfo_id.delay\n for rec in distributed:\n rec.mrp_lead_time = rec.distribution_lead_time\n for rec in self - produced - purchased - distributed:\n rec.mrp_lead_time = 0\n\n def _compute_qty_available(self):\n for rec in self:\n rec.qty_available = rec.product_id.with_context(\n location=rec.mrp_area_id.location_id.id\n ).qty_available\n\n def _compute_supply_method(self):\n group_obj = self.env[\"procurement.group\"]\n for rec in self:\n proc_loc = rec.location_proc_id or rec.mrp_area_id.location_id\n values = {\n \"warehouse_id\": rec.mrp_area_id.warehouse_id,\n \"company_id\": rec.mrp_area_id.company_id,\n }\n rule = group_obj._get_rule(rec.product_id, proc_loc, values)\n if not rule:\n rec.supply_method = \"none\"\n continue\n # Keep getting the rule for the product and the source location until the\n # action is \"buy\" or \"manufacture\". Or until the action is \"Pull From\" or\n # \"Pull & Push\" and the supply method is \"Take from Stock\".\n while rule.action not in (\"buy\", \"manufacture\") and rule.procure_method in (\n \"make_to_order\",\n \"mts_else_mto\",\n ):\n new_rule = group_obj._get_rule(\n rec.product_id, rule.location_src_id, values\n )\n if not new_rule:\n break\n rule = new_rule\n # Determine the supply method based on the final rule.\n boms = rec.product_id.product_tmpl_id.bom_ids.filtered(\n lambda x: x.type in [\"normal\", \"phantom\"]\n )\n rec.supply_method = (\n \"phantom\"\n if rule.action == \"manufacture\" and boms and boms[0].type == \"phantom\"\n else rule.action\n )\n\n @api.depends(\n \"mrp_area_id\", \"supply_method\", \"product_id.route_ids\", \"product_id.seller_ids\"\n )\n def _compute_main_supplier(self):\n \"\"\"Simplified and similar to procurement.rule logic.\"\"\"\n for rec in self.filtered(lambda r: r.supply_method == \"buy\"):\n suppliers = rec.product_id.seller_ids.filtered(\n lambda r: (not r.product_id or r.product_id == rec.product_id)\n and (not r.company_id or r.company_id == rec.company_id)\n )\n if not suppliers:\n rec.main_supplierinfo_id = False\n rec.main_supplier_id = False\n continue\n rec.main_supplierinfo_id = suppliers[0]\n rec.main_supplier_id = suppliers[0].name\n for rec in self.filtered(lambda r: r.supply_method != \"buy\"):\n rec.main_supplierinfo_id = False\n rec.main_supplier_id = False\n\n def _adjust_qty_to_order(self, qty_to_order):\n self.ensure_one()\n if (\n not self.mrp_maximum_order_qty\n and not self.mrp_minimum_order_qty\n and self.mrp_qty_multiple == 1.0\n ):\n return qty_to_order\n if qty_to_order < self.mrp_minimum_order_qty:\n return self.mrp_minimum_order_qty\n if self.mrp_qty_multiple:\n multiplier = ceil(qty_to_order / self.mrp_qty_multiple)\n qty_to_order = multiplier * self.mrp_qty_multiple\n if self.mrp_maximum_order_qty and qty_to_order > self.mrp_maximum_order_qty:\n return self.mrp_maximum_order_qty\n return qty_to_order\n\n def update_min_qty_from_main_supplier(self):\n for rec in self.filtered(\n lambda r: r.main_supplierinfo_id and r.supply_method == \"buy\"\n ):\n rec.mrp_minimum_order_qty = rec.main_supplierinfo_id.min_qty\n\n def _in_stock_moves_domain(self):\n self.ensure_one()\n locations = self.mrp_area_id._get_locations()\n return [\n (\"product_id\", \"=\", self.product_id.id),\n (\"state\", \"not in\", [\"done\", \"cancel\"]),\n (\"product_qty\", \">\", 0.00),\n (\"location_id\", \"not in\", locations.ids),\n (\"location_dest_id\", \"in\", locations.ids),\n ]\n\n def _out_stock_moves_domain(self):\n self.ensure_one()\n locations = self.mrp_area_id._get_locations()\n return [\n (\"product_id\", \"=\", self.product_id.id),\n (\"state\", \"not in\", [\"done\", \"cancel\"]),\n (\"product_qty\", \">\", 0.00),\n (\"location_id\", \"in\", locations.ids),\n (\"location_dest_id\", \"not in\", locations.ids),\n ]\n\n def action_view_stock_moves(self, domain):\n self.ensure_one()\n action = self.env.ref(\"stock.stock_move_action\").read()[0]\n action[\"domain\"] = domain\n action[\"context\"] = {}\n return action\n\n def action_view_incoming_stock_moves(self):\n return self.action_view_stock_moves(self._in_stock_moves_domain())\n\n def action_view_outgoing_stock_moves(self):\n return self.action_view_stock_moves(self._out_stock_moves_domain())\n\n def _to_be_exploded(self):\n self.ensure_one()\n return self.supply_method in [\"manufacture\", \"phantom\"]\n","repo_name":"fallewi/covagro","sub_path":"mrp_multi_level/models/product_mrp_area.py","file_name":"product_mrp_area.py","file_ext":"py","file_size_in_byte":11243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72238227465","text":"import numpy as np\n\nfrom modules._pyqt import QtCore, pg, qasync\nfrom modules.pyqt.pyqt_screen_widget import ScreenWidget\nfrom .pyqt_map_button import MapButton\n\n\nclass BaseMapWidget(ScreenWidget):\n max_height = 1\n max_width = 3\n\n buttons = None\n lock_status = True\n button_press_count = None\n\n # show range from zoom\n zoom = 2000 # [m] #for CourseProfileGraphWidget\n zoomlevel = 13 # for MapWidget\n\n # load course\n course_loaded = False\n\n # signal for physical button\n signal_move_x_plus = QtCore.pyqtSignal()\n signal_move_x_minus = QtCore.pyqtSignal()\n signal_move_y_plus = QtCore.pyqtSignal()\n signal_move_y_minus = QtCore.pyqtSignal()\n signal_zoom_plus = QtCore.pyqtSignal()\n signal_zoom_minus = QtCore.pyqtSignal()\n signal_change_move = QtCore.pyqtSignal()\n\n # for change_move\n move_adjust_mode = False\n move_factor = 1.0\n\n point_color = {\n # 'fix':pg.mkBrush(color=(0,0,160,128)),\n \"fix\": pg.mkBrush(color=(0, 0, 255)),\n # 'lost':pg.mkBrush(color=(96,96,96,128))\n \"lost\": pg.mkBrush(color=(170, 170, 170)),\n }\n\n def __init__(self, parent, config):\n self.buttons = {}\n self.button_press_count = {}\n super().__init__(parent, config)\n\n self.signal_move_x_plus.connect(self.move_x_plus)\n self.signal_move_x_minus.connect(self.move_x_minus)\n self.signal_move_y_plus.connect(self.move_y_plus)\n self.signal_move_y_minus.connect(self.move_y_minus)\n self.signal_zoom_plus.connect(self.zoom_plus)\n self.signal_zoom_minus.connect(self.zoom_minus)\n self.signal_change_move.connect(self.change_move)\n\n def setup_ui_extra(self):\n # main graph from pyqtgraph\n self.plot = pg.PlotWidget()\n self.plot.setBackground(None)\n self.plot.hideAxis(\"left\")\n self.plot.hideAxis(\"bottom\")\n\n # current point\n self.current_point = pg.ScatterPlotItem(pxMode=True)\n self.point = {\n \"pos\": [np.nan, np.nan],\n \"size\": 20,\n \"pen\": {\"color\": \"w\", \"width\": 2},\n \"brush\": self.point_color[\"lost\"],\n }\n\n # self.plot.setMouseEnabled(x=False, y=False)\n\n # make buttons\n self.buttons[\"lock\"] = MapButton(\"L\")\n self.buttons[\"zoomup\"] = MapButton(\"+\")\n self.buttons[\"zoomdown\"] = MapButton(\"-\")\n self.buttons[\"left\"] = MapButton(\"←\")\n self.buttons[\"right\"] = MapButton(\"→\")\n self.buttons[\"up\"] = MapButton(\"↑\")\n self.buttons[\"down\"] = MapButton(\"↓\")\n self.buttons[\"go\"] = MapButton(\"Go\")\n\n self.buttons[\"lock\"].clicked.connect(self.switch_lock)\n self.buttons[\"right\"].clicked.connect(self.move_x_plus)\n self.buttons[\"left\"].clicked.connect(self.move_x_minus)\n self.buttons[\"up\"].clicked.connect(self.move_y_plus)\n self.buttons[\"down\"].clicked.connect(self.move_y_minus)\n self.buttons[\"zoomdown\"].clicked.connect(self.zoom_minus)\n self.buttons[\"zoomup\"].clicked.connect(self.zoom_plus)\n\n # long press\n self.buttons[\"lock\"].setAutoRepeat(True)\n self.buttons[\"lock\"].setAutoRepeatDelay(1000)\n self.buttons[\"lock\"].setAutoRepeatInterval(1000)\n self.buttons[\"lock\"]._state = 0\n self.button_press_count[\"lock\"] = 0\n\n self.get_max_zoom()\n\n # override disable\n def set_minimum_size(self):\n pass\n\n def resizeEvent(self, event):\n super().resizeEvent(event)\n # for expanding row\n n = self.layout.rowCount()\n h = int(self.size().height() / n)\n for i in range(n):\n self.layout.setRowMinimumHeight(i, h)\n\n def lock_off(self):\n self.lock_status = False\n\n def lock_on(self):\n self.lock_status = True\n\n def switch_lock(self):\n if self.lock_status:\n self.lock_off()\n else:\n self.lock_on()\n\n def change_move(self):\n if not self.move_adjust_mode:\n self.move_factor = 32\n self.move_adjust_mode = True\n else:\n self.move_factor = 1.0\n self.move_adjust_mode = False\n\n @qasync.asyncSlot()\n async def move_x_plus(self):\n await self.move_x(+self.zoom / 2)\n\n @qasync.asyncSlot()\n async def move_x_minus(self):\n await self.move_x(-self.zoom / 2)\n\n @qasync.asyncSlot()\n async def move_y_plus(self):\n await self.move_y(+self.zoom / 2)\n\n @qasync.asyncSlot()\n async def move_y_minus(self):\n await self.move_y(-self.zoom / 2)\n\n async def move_x(self, delta):\n self.move_pos[\"x\"] += delta\n await self.update_display()\n\n async def move_y(self, delta):\n self.move_pos[\"y\"] += delta\n await self.update_display()\n\n @qasync.asyncSlot()\n async def zoom_plus(self):\n self.zoom /= 2\n self.zoomlevel += 1\n await self.update_display()\n\n @qasync.asyncSlot()\n async def zoom_minus(self):\n self.zoom *= 2\n self.zoomlevel -= 1\n await self.update_display()\n\n def get_max_zoom(self):\n if not self.course.is_set:\n return\n\n if self.config.G_MAX_ZOOM != 0:\n return\n\n z = self.zoom\n dist = self.course.distance[-1]\n\n if z / 1000 < dist:\n while z / 1000 < dist:\n z *= 2\n z *= 2\n else:\n while z / 1000 > dist:\n z /= 2\n self.config.G_MAX_ZOOM = z\n","repo_name":"hishizuka/pizero_bikecomputer","sub_path":"modules/pyqt/graph/pyqt_base_map.py","file_name":"pyqt_base_map.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","stars":633,"dataset":"github-code","pt":"81"} +{"seq_id":"19050168006","text":"from flask import render_template, request, Blueprint, redirect, url_for\nfrom flask_login import current_user\nfrom kindlycare import db\n\nfrom kindlycare.doctors.forms import FilterForm\nfrom kindlycare.models import Doctors, Hospitals, Feedback, Slots\n\ncore = Blueprint('core', __name__)\n\n\n@core.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n opt = request.form.get('options')\n print(opt) # Value of the Radio Button\n return redirect(url_for('core.filter', opt=opt))\n # print(current_user.slots.morning_slots)\n '''slot = Slots.query.filter_by(doc_id=current_user.id).all()\n for i in slot:\n print(i.morning_slots)'''\n docs = []\n hosps = []\n hosps = Hospitals.query.limit(6).all()\n print(hosps)\n docs.append(Doctors.query.filter_by(specialization='Dentist').first())\n docs.append(Doctors.query.filter_by(specialization='Pediatrition').first())\n docs.append(Doctors.query.filter_by(\n specialization='General Physician').first())\n docs.append(Doctors.query.filter_by(\n specialization='Heart Surgeon').first())\n docs.append(Doctors.query.filter_by(specialization='Homeopath').first())\n docs.append(Doctors.query.filter_by(specialization='Neurologist').first())\n\n docs = [i for i in docs if i]\n print(docs)\n return render_template('index.html', docs=docs, hosps=hosps)\n\n\n@core.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@core.route('/filter')\ndef filter():\n opt = request.args.get('opt')\n docs = Doctors.query.filter_by(specialization=opt).all()\n return render_template('home.html', docs=docs)\n\n\n@core.route('/docs')\ndef docs():\n if request.method == 'POST':\n opt = request.form.get('options')\n print(opt) # Value of the Radio Button\n return redirect(url_for('core.filter', opt=opt))\n\n docs = Doctors.query.all()\n return render_template('home.html', docs=docs)\n\n\n@core.route('/hosps')\ndef hosps():\n hosp = Hospitals.query.all()\n return render_template('home.html', hosp=hosp)\n\n\n@core.route('/doctors')\ndef doctors():\n return render_template('doctors.html')\n\n\n@core.route('/search')\ndef search():\n re = request.args.get(\"query\")\n req = str(re).lower().split(\" \")\n doc_list = []\n hosp_list = []\n docs = Doctors.query.all()\n hosp = Hospitals.query.all()\n for re in req:\n for d in docs:\n print(d.name)\n if re in d.name.lower().split(\" \") or re in d.qualification.lower().split(\" \") or re in d.specialization.lower().split(\" \"):\n doc_list.append(d)\n for h in hosp:\n if re in h.name.lower().split(\" \") or re in h.speciality.lower().split(\" \"):\n hosp_list.append(h)\n return render_template('home.html', docs=doc_list, hosp=hosp_list)\n","repo_name":"rishikaul22/Kindly-Care","sub_path":"kindlycare/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27278555122","text":"#!/usr/bin/env python3\n\nfrom functools import wraps\nfrom time import time\nimport numpy as np\n\n# from https://stackoverflow.com/questions/1622943/timeit-versus-timing-decorator\n\n\ndef timing(f):\n @wraps(f)\n def wrap(*args, **kw):\n ts = time()\n result = f(*args, **kw)\n te = time()\n print('func:%r took: %2.4f sec' % (f.__name__, te-ts))\n return result\n return wrap\n\n\n@timing\ndef bruteforce(inputlist):\n inversions = 0\n length = len(inputlist)\n for i in range(length):\n for j in range(i+1, length):\n if inputlist[i] > inputlist[j]:\n inversions += 1\n\n return inversions\n\n\n@timing\ndef mergesort(inputlist):\n return merge(inputlist)\n\n\ndef merge(inputlist):\n inversions = 0\n # based on https://www.geeksforgeeks.org/merge-sort/\n\n if len(inputlist) > 1:\n # split\n middle = len(inputlist) // 2\n left = inputlist[:middle]\n right = inputlist[middle:]\n\n inversions += merge(left)\n inversions += merge(right)\n\n i = j = k = 0\n\n # merge lists by stepping through both and finding the smallest element\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n inputlist[k] = left[i]\n i += 1\n else:\n inversions += len(left) - i\n inputlist[k] = right[j]\n j += 1\n k += 1\n\n # clean up any leftovers\n while i < len(left):\n inputlist[k] = left[i]\n i += 1\n k += 1\n\n while j < len(right):\n inputlist[k] = right[j]\n j += 1\n k += 1\n\n return inversions\n\n\ndef main():\n n = 10**3\n inputlist = np.random.randint(n, size=n).tolist()\n # use slicing to pass by value\n expected = bruteforce(inputlist[:])\n actual = mergesort(inputlist[:])\n print(actual, expected)\n assert actual == expected\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mbreault/python","sub_path":"algorithms/inversions/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11111563573","text":"import datetime\nfrom aciconfigdb import ConfigDB\nfrom flask import Flask, render_template, session, redirect, url_for\nfrom flask import flash, send_from_directory, request\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom requests import Timeout\nfrom flask.ext import admin\nfrom flask.ext.admin import BaseView, AdminIndexView, expose\nfrom flask.ext.admin.actions import action\nfrom flask.ext.admin.contrib.sqla import ModelView\nfrom flask.ext.admin.model.template import macro\nfrom flask.ext.bootstrap import Bootstrap\nfrom flask.ext.wtf import Form, CsrfProtect\nfrom wtforms import StringField, SubmitField, PasswordField, BooleanField\nfrom wtforms import RadioField, IntegerField, TextAreaField, SelectField\nfrom wtforms.fields.html5 import DateField, DateTimeField\nfrom wtforms.validators import Required, IPAddress\nfrom wtforms.validators import ValidationError, Optional\nimport json\nimport re\nfrom flask import jsonify\nfrom acitoolkit.acitoolkit import Credentials\nimport difflib\n\n# Create application\napp = Flask(__name__, static_folder='static')\n\n# Cross site replay security\napp.config['SECRET_KEY'] = 'Dnit7qz7mfcP0YuelDrF8vLFvk0snhwP'\napp.config['CSRF_ENABLED'] = True\ncsrf = CsrfProtect(app)\n\nbootstrap = Bootstrap(app)\n\n# Create in-memory database\napp.config['DATABASE_FILE'] = 'snapshots.sqlite'\napp.config['SQLALCHEMY_DATABASE_URI'] = ('sqlite:///' +\n app.config['DATABASE_FILE'])\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\n\n# Create the ACI Config Database\ncdb = ConfigDB()\nversions = cdb.get_versions()\n\n\n# Credentials\nclass CredentialsForm(Form):\n \"\"\"\n Form to collect APIC credentials\n \"\"\"\n ipaddr = StringField('APIC IP Address:',\n validators=[Required(), IPAddress()])\n secure = BooleanField('Use secure connection', validators=[])\n username = StringField('APIC Username:', validators=[Required()])\n password = PasswordField('APIC Password:', validators=[Required()])\n submit = SubmitField('Save')\n\n\nclass ResetForm(Form):\n \"\"\"\n Form to hold reset button\n \"\"\"\n reset = SubmitField('Reset')\n\n\nclass CancelSchedule(Form):\n \"\"\"\n Form to hold cancel schedule\n \"\"\"\n cancel = SubmitField('Cancel Schedule')\n\n\nclass DiffView(BaseView):\n \"\"\"\n View for the JSON diffs\n \"\"\"\n def is_visible(self):\n \"\"\"\n Indicates whether the view is visible\n :return: False\n \"\"\"\n return False\n\n @expose('/')\n def index(self):\n \"\"\"\n Main diff routine\n :return: Rendered view\n \"\"\"\n files = []\n (file1_id, file2_id) = session.get('diff_files')\n file1_obj = Snapshots.query.get(file1_id)\n file2_obj = Snapshots.query.get(file2_id)\n file1 = str(cdb.get_file(file1_obj.filename,\n file1_obj.version)).split('\\n')\n file2 = str(cdb.get_file(file2_obj.filename,\n file2_obj.version)).split('\\n')\n diff = difflib.HtmlDiff(wrapcolumn=120)\n table = diff.make_table(file1, file2)\n return self.render('diffview.html', table=table)\n\n\nclass FileView(BaseView):\n \"\"\"\n View for the JSON Files\n \"\"\"\n def is_visible(self):\n return False\n\n @expose('/')\n def index(self):\n files = []\n for fileid in session.get('viewfiles'):\n file_obj = Snapshots.query.get(fileid)\n files.append([cdb.get_file(file_obj.filename, file_obj.version),\n file_obj.filename,\n file_obj.version])\n return self.render('fileview.html', files=files)\n\n\nclass FeedbackForm(Form):\n \"\"\"\n Form for feedback\n \"\"\"\n category = SelectField('', choices=[('bug', 'Bug'),\n ('enhancement', 'Enhancement Request'),\n ('question', 'General Question'),\n ('comment', 'General Comment')])\n comment = TextAreaField('Comment', validators=[Required()])\n submit = SubmitField('Submit')\n\n\nclass Feedback(BaseView):\n \"\"\"\n View for feedback\n \"\"\"\n @expose('/')\n def index(self):\n \"\"\"\n View for feedback\n \"\"\"\n form = FeedbackForm()\n return self.render('feedback.html', form=form)\n\n\nclass About(BaseView):\n \"\"\"\n View for About\n \"\"\"\n @expose('/')\n def index(self):\n \"\"\"\n View for About\n \"\"\"\n return self.render('about.html')\n\n\nclass StackedDiffsForm(Form):\n \"\"\"\n Form for stacked diffs of JSON file lines\n \"\"\"\n show = SubmitField('Show versions with no diffs')\n hide = SubmitField('Hide versions with no diffs')\n start_version = SelectField('Start Version')\n end_version = SelectField('End Version')\n daterange = SubmitField('Set Date Range')\n\n\nclass StackedDiffs(BaseView):\n \"\"\"\n View for stacked diffs of JSON file lines\n \"\"\"\n @expose('/')\n def index(self):\n \"\"\"\n View for stacked diffs of JSON file lines\n \"\"\"\n if session.get('hideall') is None:\n session['hideall'] = False\n changes = cdb.get_versions(with_changes=True)\n if changes is None:\n changes = []\n start_choices = []\n end_choices = []\n for choice in changes:\n start_choices.append((choice[0], choice[0]))\n end_choices.append((choice[0], choice[0]))\n if session.get('diffstartversion') is None:\n if len(start_choices):\n session['diffstartversion'] = start_choices[0]\n if session.get('diffendversion') is None:\n if len(end_choices):\n session['diffendversion'] = end_choices[-1]\n MyStackedDiffsForm = type('MyStackedDiffsForm',\n (StackedDiffsForm,),\n {'start_version': SelectField('Start Version',\n default=session.get('diffstartversion'),\n choices=start_choices),\n 'end_version': SelectField('End Version',\n default=session.get('diffendversion'),\n choices=end_choices)})\n form = MyStackedDiffsForm()\n f = open('static/data.csv', 'w')\n f.write('Version,Deletions,Additions\\n')\n in_range = False\n if session.get('diffstartversion') is None:\n in_range = True\n for (version, additions, deletions) in changes:\n if not in_range and version == session.get('diffstartversion'):\n in_range = True\n if in_range:\n f.write(version + ',' + deletions + ',' + additions + '\\n')\n if in_range and version == session.get('diffendversion'):\n in_range = False\n f.close()\n return self.render('stackedbardiffs.html',\n form=form,\n hideall=session.get('hideall'))\n\n @expose('/showhide', methods=['GET', 'POST'])\n def showhide(self):\n \"\"\"\n Hide or show the stacked diffs of JSON file lines\n \"\"\"\n # form = StackedDiffsForm()\n if session.get('hideall') is False:\n session['hideall'] = True\n else:\n session['hideall'] = False\n print('Passing ', session.get('hideall'))\n return redirect(url_for('stackeddiffs.index'))\n\n @expose('/setstartenddiffs', methods=['GET', 'POST'])\n def setstartenddiffs(self):\n \"\"\"\n Set the start and end of diffs\n \"\"\"\n session['diffstartversion'] = request.form['start_version']\n session['diffendversion'] = request.form['end_version']\n return redirect(url_for('stackeddiffs.index'))\n\n @expose('/data.csv')\n def data(self):\n \"\"\"\n Get the data\n \"\"\"\n with open('static/data.csv', 'r') as data_file:\n data = data_file.read()\n return data\n\n\nclass ScheduleSnapshotForm(Form):\n \"\"\"\n Form for scheduling the snapshot\n \"\"\"\n frequency = RadioField('Frequency',\n choices=[('onetime', 'One time'),\n ('interval', 'Every')],\n validators=[Required()],\n default='onetime')\n number = IntegerField('', validators=[Optional()])\n interval = SelectField('', choices=[('minutes', 'minutes'),\n ('hours', 'hours'),\n ('days', 'days')])\n date = DateField('Start date', format='%Y-%m-%d',\n default=datetime.datetime.now)\n time = DateTimeField('Start time', format='%H:%M',\n default=datetime.datetime.now)\n submit = SubmitField('Schedule Snapshot')\n\n def validate_number(form, field):\n if form.frequency.data != 'interval':\n raise ValidationError('Should not be set for One time')\n if not isinstance(form.number.data, int) or (form.number.data < 1):\n raise ValidationError('Should be a number greater than 1')\n\n\nclass APICArgs(object):\n \"\"\"\n Class to hold the APIC credentials\n \"\"\"\n def __init__(self, ipaddr, username, secure, password):\n self.login = username\n self.password = password\n if secure:\n self.url = 'https://' + ipaddr\n else:\n self.url = 'http://' + ipaddr\n\n\nclass ScheduleSnapshot(BaseView):\n \"\"\"\n View to schedule the snapshot\n \"\"\"\n @expose('/', methods=['GET', 'POST'])\n def index(self):\n \"\"\"\n View to schedule the snapshot\n \"\"\"\n form = ScheduleSnapshotForm()\n cancel_form = CancelSchedule()\n if cancel_form.cancel.data:\n cdb.cancel_schedule()\n elif form.validate_on_submit() and form.submit.data:\n # Check if we have APIC Credentials and fail if none.\n if session.get('ipaddr') is None:\n flash('APIC Credentials have not been entered', 'error')\n return self.render('snapshot.html', form=form,\n cancel_form=cancel_form,\n lastsnapshot=cdb.get_latest_snapshot_time(),\n nextsnapshot=cdb.get_next_snapshot_time(),\n schedule=cdb.get_current_schedule())\n args = APICArgs(session.get('ipaddr'),\n session.get('username'),\n session.get('secure'),\n session.get('password'))\n # Login (Always do this since multiple login doesn't hurt and\n # this will automatically cover when credentials change)\n try:\n resp = cdb.login(args)\n if resp.ok is not True:\n flash('Unable to login to the APIC', 'error')\n return self.render('snapshot.html', form=form,\n cancel_form=cancel_form,\n lastsnapshot=cdb.get_latest_snapshot_time(),\n nextsnapshot=cdb.get_next_snapshot_time(),\n schedule=cdb.get_current_schedule())\n except Timeout:\n flash('Connection timeout when trying to reach the APIC',\n 'error')\n return self.render('snapshot.html', form=form,\n cancel_form=cancel_form,\n lastsnapshot=cdb.get_latest_snapshot_time(),\n nextsnapshot=cdb.get_next_snapshot_time(),\n schedule=cdb.get_current_schedule())\n\n date = form.date.data\n time = form.time.data\n start = datetime.datetime(date.year, date.month, date.day,\n time.hour, time.minute)\n # Take a snapshot\n cdb.schedule_snapshot(form.frequency.data,\n form.number.data,\n form.interval.data,\n form.date.data,\n form.time.data,\n build_db)\n\n flash('Snapshot successfully scheduled', 'success')\n return redirect(url_for('schedulesnapshot.index'))\n return self.render('snapshot.html', form=form,\n cancel_form=cancel_form,\n lastsnapshot=cdb.get_latest_snapshot_time(),\n nextsnapshot=cdb.get_next_snapshot_time(),\n schedule=cdb.get_current_schedule())\n\n\ndef DiffFiles(diffList, data):\n \"\"\"\n Get the diff between snapshots\n \"\"\"\n diff = \"\"\n versions_exist = cdb.get_versions()\n versions_needed = [diffList[0].get('version'), diffList[1].get('version')]\n if all(x in versions_exist for x in versions_needed):\n file1 = str(cdb.get_file(diffList[0].get('filename'),\n diffList[0].get('version'))).split('\\n')\n file2 = str(cdb.get_file(diffList[1].get('filename'),\n diffList[1].get('version'))).split('\\n')\n d = difflib.Differ()\n diff = d.compare(file1, file2)\n return diff\n\n\ndef ViewFile(viewList, data):\n \"\"\"\n Get the list of files\n \"\"\"\n files = \"\"\n for snapshot_needed in viewList:\n for snapshot in data['snapshots']:\n if (snapshot['filename'] == snapshot_needed['filename']) and (snapshot['version'] == snapshot_needed['version']):\n snapshot_file = cdb.get_file(snapshot_needed['filename'], snapshot_needed['version'])\n files += snapshot_file.encode('utf-8').decode('unicode_escape')\n return files\n\n\ndef DeleteFiles(deleteList, data):\n \"\"\"\n Delete the list of snapshots\n \"\"\"\n for delete_snapshot in deleteList:\n for snapshot in data['snapshots']:\n if (snapshot['filename'] == delete_snapshot['filename']) and (snapshot['version'] == delete_snapshot['version']) and (snapshot['latest'] == delete_snapshot['latest']):\n data['snapshots'].remove(snapshot)\n return data\n\n\ndef Filtering(filter_key_item, filter_args, data):\n \"\"\"\n Filter the snapshot list\n \"\"\"\n if filter_key_item == 'Version':\n filter_key = 'version'\n elif filter_key_item == 'Filename':\n filter_key = 'filename'\n elif filter_key_item == 'Latest':\n filter_key = 'latest'\n filtered = []\n if filter_key_item in filter_args:\n filter_key_args_list = filter_args[filter_key_item]\n for filter_args in filter_key_args_list:\n filtered = []\n for snapshot in data['snapshots']:\n match = filter_args['match']\n if filter_key in filter_args:\n needed = filter_args[filter_key]\n if match == \"equals\" and needed == snapshot[filter_key]:\n filtered.append(snapshot)\n if match == \"not equal\" and needed != snapshot[filter_key]:\n filtered.append(snapshot)\n if match == \"contains\":\n prog = re.search(needed, snapshot[filter_key])\n if prog is not None:\n filtered.append(snapshot)\n if match == \"not contains\":\n prog = re.search(needed, snapshot[filter_key])\n if prog is None:\n filtered.append(snapshot)\n if match == \"in list\":\n if type(filter_args[filter_key]) is list:\n if snapshot[filter_key] in filter_args[filter_key]:\n filtered.append(snapshot)\n else:\n needed = filter_args[filter_key]\n if needed == snapshot[filter_key]:\n filtered.append(snapshot)\n if match == \"not in list\":\n if type(filter_args[filter_key]) is list:\n if snapshot[filter_key] not in filter_args[filter_key]:\n filtered.append(snapshot)\n else:\n needed = filter_args[filter_key]\n if needed != snapshot[filter_key]:\n filtered.append(snapshot)\n if match == \"empty\":\n if \"match_for\" in filter_args:\n match_for = filter_args['match_for']\n else:\n match_for = True\n if match_for:\n filtered = []\n break\n else:\n filtered.append(snapshot)\n if (match == \"equals\" or match == \"not equal\") and filter_key == \"latest\":\n if \"match_for\" in filter_args:\n match_for = filter_args['match_for']\n else:\n match_for = True\n if (match == \"equals\" and snapshot['latest'] is match_for):\n filtered.append(snapshot)\n if (match == \"not equal\" and snapshot['latest'] is not match_for):\n filtered.append(snapshot)\n data['snapshots'] = filtered\n else:\n filtered = data['snapshots']\n return data\n\n\ndef FilterFunction(filter_args, data):\n \"\"\"\n Filter the snapshots\n \"\"\"\n filtered = []\n if 'Version' in filter_args:\n filtered = Filtering('Version', filter_args, data)\n data = filtered\n if 'Filename' in filter_args:\n filtered = Filtering('Filename', filter_args, data)\n data = filtered\n if 'Latest' in filter_args:\n filtered = Filtering('Latest', filter_args, data)\n data = filtered\n return data\n\n\nclass JsonInterface(BaseView):\n \"\"\"\n JSON interface to program the tool\n \"\"\"\n @csrf.exempt\n @app.route('/login', methods=['POST'])\n def login(self):\n if request.method == 'POST':\n data = request.json\n session['ipaddr'] = data['ipaddr']\n session['secure'] = data['secure']\n session['username'] = data['username']\n session['password'] = data['password']\n if (session.get('ipaddr') is None or session.get('username') is None or session.get('password') is None):\n return\"please provide ipaddress, username,password\"\n\n args = APICArgs(session.get('ipaddr'),\n session.get('username'),\n session.get('secure'),\n session.get('password'))\n try:\n resp = cdb.login(args)\n if resp.ok is not True:\n return'Unable to login to the APIC'\n except Timeout:\n return'Connection timeout when trying to reach the APIC'\n return'loged in'\n '''\n method to login to aci config db\n usage : curl -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/login --data @login.json\n and the login.json structure is\n {\"ipaddr\":\"\",\"secure\":\"\",\"username\":\"\",\"password\":\"\"}\n '''\n\n @csrf.exempt\n @app.route('/viewsnapshots', methods=['POST'])\n def viewsnapshots(self):\n \"\"\"\n View the snapshots\n \"\"\"\n if request.method == 'POST':\n versions = cdb.get_versions(with_changes=True)\n data = {}\n Snapshots = []\n for (version, additions, deletions) in versions:\n for (filename, adds, dels) in cdb.get_filenames(version,\n prev_version=None,\n with_changes=True):\n item = {}\n item['filename'] = filename\n item['version'] = version\n is_latest = (version == cdb.get_latest_file_version(filename))\n item['latest'] = is_latest\n Snapshots.append(item)\n data['snapshots'] = Snapshots\n if len(data['snapshots']) is 0:\n return \"no snapshots\"\n if request.headers['Content-Type'] == 'application/json':\n if 'filter' in request.json and request.json['filter'] is not None:\n filter_args = {}\n filter_args = request.json['filter']\n data = FilterFunction(filter_args, data)\n '''\n this method is used to filter snapshots based on version, filename,latest\n usage : curl H \"Content-Type: application/json\" --X POST http://127.0.0.1:5000/viewsnapshot -d '{\"filter\":{\"Version\" : [{\"version\":\"2016-04-22_14.33.39\",\"match\":\"equals\"}],\"Filename\" : [{\"filename\":\"snapshot_172.31.216.100_10.json\",\"match\":\"equals\"}],\"Latest\":[{\"match\":\"not equal\",\"match_for\":false}]}}'\n return all the snapshots matching this criteria as a json\n '''\n if 'outputfile' in request.json and request.json['outputfile'] is not None:\n with open(request.json['outputfile'], 'w') as txtfile:\n json.dump(data, txtfile)\n '''\n this method is used to write all the snapshots in json format in the given outputfile\n usage : curl -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/viewsnapshots -d '{\"outputfile\":\"final.txt\"}'\n '''\n if 'action' in request.json:\n action = request.json['action']\n if 'View' in action:\n if len(action['View']) is 0:\n return \"Please select at least one record\"\n else:\n data_for_view = {}\n data_for_view['snapshots'] = ViewFile(action['View'], data)\n return data_for_view['snapshots']\n '''\n this method is used to view a single snapshot given name and version in json format\n usgae: curl H \"Content-Type: application/json\" --X POST http://127.0.0.1:5000/viewsnapshot -d '{\"action\":{\"View\" :[{\"filename\":\"snapshot_172.31.216.100_10.json\",\"version\":\"2016-04-22_14.33.39\",\"latest\":true},{\"filename\":\"snapshot_172.31.216.100_10.json\",\"version\":\"2016-04-22_14.33.39\",\"latest\":true}]}}'\n displays the json content on terminal\n '''\n if 'Delete' in action:\n if len(action['Delete']) is 0:\n return \"Please select at least one record.\"\n else:\n data = DeleteFiles(action['Delete'], data)\n '''\n this method is used to delete a snapshots given name and version in json format\n usgae: curl -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/viewsnapshots -d '{\"action\":{\"Delete\" : [{\"filename\":\"snapshot_172.31.216.100_10.json\",\"version\":\"2016-04-22_14.33.39\",\"latest\":true},{\"filename\":\"snapshot_172.31.216.100_10.json\",\"version\":\"2016-04-22_14.33.39\",\"latest\":true}]}}'\n displays the remaining snapshots in json format on terminal\n '''\n if 'View Diffs' in action:\n if len(action['View Diffs']) is 0:\n return \"Please select at least one record\"\n elif len(action['View Diffs']) is 1:\n return \"Please select 2 snapshots to view diffs\"\n elif len(action['View Diffs']) > 2:\n return \"Please select only 2 snapshots to view diffs\"\n else:\n diff = DiffFiles(action['View Diffs'], data)\n return '\\n'.join(diff)\n '''\n this method is used to diff 2 snapshots given name and version in json format\n usgae: curl -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/viewsnapshots -d '{\"action\":{\"View Diffs\" :[{\"filename\":\"snapsho72.31.216.100_10.json\",\"version\":\"2016-04-22_14.33.39\",\"latest\":true},{\"filename\":\"snapshot_172.31.216.100_10.json\",\"version\":\"2016-04-22_14.33.39\",\"latest\":true}]}}'\n displays the diff of 2 snapshots on terminal\n '''\n return jsonify(snapshots=data['snapshots'])\n '''\n this method is used to view all the snapshots and write it to a file in json format\n usage : curl -X POST http://127.0.0.1:5000/viewsnapshots\n '''\n\n @csrf.exempt\n @app.route('/logout')\n def logout(self):\n \"\"\"\n this method is used to logout. clears all session variables.\n usage: curl -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/logout\n \"\"\"\n if cdb.is_logged_in():\n session['ipaddr'] = None\n session['secure'] = None\n session['username'] = None\n session['password'] = None\n return 'logged out'\n else:\n return 'logged out'\n\n @csrf.exempt\n @app.route('/cancelschedule', methods=['POST'])\n def cansel_schedule(self):\n \"\"\"\n this method is used to cansel the latest scheduled snapshots\n usage: curl -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/cancelschedule\n \"\"\"\n if request.method == 'POST':\n resp = cdb.cancel_schedule\n return'cansel schedule successfull'\n\n @csrf.exempt\n @app.route('/schedulesnapshot', methods=['POST'])\n def schedulesnapshot(self):\n \"\"\"\n Schedule the snapshot\n \"\"\"\n if request.method == 'POST':\n data = request.json\n if cdb.is_logged_in():\n if 'date' in data:\n date = datetime.datetime.strptime(data['date'], '%b %d %Y')\n else:\n date = datetime.date.today()\n if 'starttime' in data:\n starttime = datetime.datetime.strptime(data['starttime'], '%I:%M%p')\n else:\n starttime = datetime.datetime.now()\n if 'frequency' in data and (data['frequency'] == \"onetime\" or data['frequency'] == \"interval\"):\n if 'interval' in data and (data['interval'] == \"minutes\" or data['interval'] == \"hours\" or data['interval'] == \"days\"):\n cdb.schedule_snapshot(data['frequency'],\n data['number'],\n data['interval'],\n date,\n starttime,\n build_db)\n\n versions = cdb.get_versions(with_changes=True)\n data = {}\n Snapshots = []\n for (version, additions, deletions) in versions:\n for (filename, adds, dels) in cdb.get_filenames(version,\n prev_version=None,\n with_changes=True):\n item = {}\n item['filename'] = filename\n item['version'] = version\n is_latest = (version == cdb.get_latest_file_version(filename))\n item['latest'] = is_latest\n Snapshots.append(item)\n data['snapshots'] = Snapshots\n return\"Snapshot successfully scheduled\\n\"\n else:\n return 'please login'\n '''\n this method is used to schedule snapshots.\n usage : curl -H \"Content-Type: application/json\" -X POST http://127.0.0.1:5000/schedulesnapshot --data @sample.json\n and the sample.json structure is\n {\"frequency\":\"onetime\",\"date\":\"Jun 1 2005\",\"starttime\":\"1:33PM\",\"number\":\"\",\"interval\":\"minutes\"}\n interval supports minutes , hours, days\n frequency supports onetime and ongoing\n '''\n\n\nclass CredentialsView(BaseView):\n \"\"\"\n View for the APIC credentials\n \"\"\"\n @expose('/', methods=['GET', 'POST'])\n def index(self):\n \"\"\"\n View for the APIC credentials\n \"\"\"\n form = CredentialsForm()\n reset_form = ResetForm()\n if form.validate_on_submit() and form.submit.data:\n old_ipaddr = session.get('ipaddr')\n old_username = session.get('username')\n old_secure = session.get('secure')\n old_password = session.get('password')\n if ((old_ipaddr is not None and old_ipaddr != form.ipaddr.data) or\n (old_username is not None and old_username != form.username.data) or\n (old_secure is not None and old_secure != form.secure.data) or\n (old_password is not None and old_password != form.password.data)):\n flash('APIC Credentials have been updated')\n session['ipaddr'] = form.ipaddr.data\n session['secure'] = form.secure.data\n session['username'] = form.username.data\n session['password'] = form.password.data\n return redirect(url_for('credentialsview.index'))\n elif reset_form.reset.data:\n session['ipaddr'] = None\n session['secure'] = None\n session['username'] = None\n session['password'] = None\n return redirect(url_for('credentialsview.index'))\n return self.render('credentials.html', form=form,\n reset_form=reset_form,\n ipaddr=session.get('ipaddr'),\n username=session.get('username'))\n\n\n# Models\nclass Snapshots(db.Model):\n \"\"\"\n Database model for the snapshots\n \"\"\"\n id = db.Column(db.Integer, primary_key=True)\n version = db.Column(db.Unicode(64))\n filename = db.Column(db.Unicode(64))\n changes = db.Column(db.Unicode(64))\n latest = db.Column(db.Boolean())\n\n def __unicode__(self):\n return self.version\n\n\n# Customized admin interface\nclass CustomView(ModelView):\n \"\"\"\n Custom list view\n \"\"\"\n list_template = 'list.html'\n\n\nclass SnapshotsAdmin(CustomView):\n \"\"\"\n Snapshot view\n \"\"\"\n can_create = False\n can_edit = False\n column_searchable_list = ('version', 'filename')\n column_filters = ('version', 'filename', 'latest')\n column_formatters = dict(changes=macro('render_changes'))\n\n # @action('rollback', 'Rollback',\n # 'Are you sure you want to rollback the configuration ?')\n # def rollback(*args, **kwargs):\n # if session.get('ipaddr') is None:\n # flash('APIC Credentials have not been entered', 'error')\n # return redirect(url_for('snapshotsadmin.index_view'))\n # login_args = APICArgs(session.get('ipaddr'),\n # session.get('username'),\n # session.get('secure'),\n # session.get('password'))\n # try:\n # resp = cdb.login(login_args)\n # if resp.ok is not True:\n # flash('Unable to login to the APIC', 'error')\n # return redirect(url_for('snapshotsadmin.index_view'))\n # except Timeout:\n # flash('Connection timeout when trying to reach the APIC', 'error')\n # return redirect(url_for('snapshotsadmin.index_view'))\n #\n # rollback_files = {}\n # for file_id in args[1]:\n # file_obj = Snapshots.query.get(file_id)\n # version = file_obj.version\n # if version not in rollback_files:\n # rollback_files[version] = []\n # rollback_files[version].append(file_obj.filename)\n # for version in rollback_files:\n # cdb.rollback(version, rollback_files[version])\n # flash(('APIC has been successfully rolled back to the specified'\n # ' version'), 'success')\n # return redirect(url_for('snapshotsadmin.index_view'))\n\n @action('view_diffs', 'View Diffs')\n def view_diffs(*args, **kwargs):\n \"\"\"\n View the snapshot diffs\n \"\"\"\n if len(args[1]) != 2:\n if len(args[1]) > 2:\n flash('Please select only 2 snapshots to view diffs')\n else:\n flash('Please select 2 snapshots to view diffs')\n else:\n session['diff_files'] = args[1]\n return redirect(url_for('diffview.index'))\n\n @action('view', 'View')\n def view(*args, **kwargs):\n \"\"\"\n View the snapshot\n \"\"\"\n for arg in args[1]:\n obj = Snapshots.query.get(arg)\n session['viewfiles'] = args[1]\n return redirect(url_for('fileview.index'))\n\n\nclass RollbackForm(Form):\n \"\"\"\n Form for the rollback\n \"\"\"\n version = SelectField('Version', coerce=int)\n rollback = SubmitField('Rollback')\n\n\nclass RollbackView(BaseView):\n \"\"\"\n View for the rollback\n \"\"\"\n @expose('/', methods=['GET', 'POST'])\n def index(self):\n \"\"\"\n View for the rollback\n \"\"\"\n if session.get('ipaddr') is None:\n flash('APIC Credentials have not been entered', 'error')\n return redirect(url_for('rollbackview.index'))\n args = APICArgs(session.get('ipaddr'),\n session.get('username'),\n session.get('secure'),\n session.get('password'))\n # Login (Always do this since multiple login doesn't hurt and\n # this will automatically cover when credentials change)\n try:\n resp = cdb.login(args)\n if resp.ok is not True:\n flash('Unable to login to the APIC', 'error')\n return redirect(url_for('rollbackview.index'))\n except Timeout:\n flash('Connection timeout when trying to reach the APIC',\n 'error')\n return redirect(url_for('rollbackview.index'))\n form = RollbackForm()\n versions = cdb.get_versions(with_changes=True)\n rollback_versions = []\n if versions is not None:\n count = 0\n for version in versions:\n count += 1\n rollback_versions.append((count, version[0]))\n form.version.choices = rollback_versions\n if form.validate_on_submit() and form.rollback.data:\n version = rollback_versions[form.version.data - 1][1]\n cdb.rollback_using_import_policy(version)\n flash('Rollback successfully processed', 'success')\n return redirect(url_for('rollbackview.index'))\n return self.render('rollback.html', form=form, versions=[])\n\n\n# Create admin with custom base template\nhomepage_view = AdminIndexView(name='Home', template='admin/index.html',\n url='/')\nadmin = admin.Admin(app,\n name='Snapback',\n index_view=homepage_view,\n base_template='layout.html')\n\n# Add views\nadmin.add_view(CredentialsView(name='Credentials'))\nadmin.add_view(ScheduleSnapshot(name='Schedule Snapshot',\n endpoint='schedulesnapshot'))\nadmin.add_view(SnapshotsAdmin(Snapshots, db.session,\n endpoint=\"snapshotsadmin\"))\nadmin.add_view(RollbackView(name='Version Rollback'))\nadmin.add_view(StackedDiffs(name='Version Diffs'))\nadmin.add_view(About(name='About'))\nadmin.add_view(FileView(name='View'))\nadmin.add_view(DiffView(name='View Diffs'))\nadmin.add_view(Feedback(name='Feedback'))\n\n\ndef build_db():\n \"\"\"\n Populate the db with the existing snapshot images.\n \"\"\"\n db.drop_all()\n db.create_all()\n prev_version = None\n versions = cdb.get_versions(with_changes=True)\n if versions is None:\n return\n for (version, additions, deletions) in versions:\n for (filename, adds, dels) in cdb.get_filenames(version,\n prev_version=prev_version,\n with_changes=True):\n snapshot = Snapshots()\n snapshot.version = version\n snapshot.filename = filename\n snapshot.changes = adds + '/' + dels\n is_latest = (version == cdb.get_latest_file_version(filename))\n snapshot.latest = is_latest\n db.session.add(snapshot)\n prev_version = version\n db.session.commit()\n return\n\nif __name__ == '__main__':\n description = ('ACI Configuration Snapshot and Rollback tool.')\n creds = Credentials('server', description)\n args = creds.get()\n\n # Build the database\n build_db()\n\n # Start app\n app.run(debug=True, host=args.ip, port=int(args.port))\n","repo_name":"datacenter/acitoolkit","sub_path":"applications/snapback/snapback.py","file_name":"snapback.py","file_ext":"py","file_size_in_byte":37537,"program_lang":"python","lang":"en","doc_type":"code","stars":341,"dataset":"github-code","pt":"81"} +{"seq_id":"41262511967","text":"#\n# @lc app=leetcode id=516 lang=python\n#\n# [516] Longest Palindromic Subsequence\n#\n\n# @lc code=start\nclass Solution(object):\n def longestPalindromeSubseq(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n n = len(s)\n dp = [[0] * n for _ in range(n)]\n result = 0\n for i in range(n-1, -1, -1):\n for j in range(i, n):\n if i == j:\n dp[i][j] = 1\n elif s[i] == s[j]:\n dp[i][j] = 2 \n if i+1 <= j-1:\n dp[i][j] += dp[i+1][j-1]\n else:\n dp[i][j] = max(dp[i][j-1], dp[i+1][j])\n if i+1 <= j-1:\n dp[i][j] = max(dp[i+1][j-1], dp[i][j])\n result = max(result, dp[i][j])\n return result\n# @lc code=end\n\n","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/516.longest-palindromic-subsequence.py","file_name":"516.longest-palindromic-subsequence.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"1410070674","text":"from functools import wraps\n\ndef memoize(function):\n\tmemo = {}\n\t@wraps(function)\n\tdef wrapper(*args):\n\t\tif args in memo:\n\t\t\treturn memo[args]\n\t\telse:\n\t\t\trv = function(*args)\n\t\t\tmemo[args] = rv\n\t\t\treturn rv\n\treturn wrapper\n\n\n@memoize\ndef fibonacci(n):\n\tif n < 2:\n\t\treturn n\n\treturn fibonacci(n-1) + fibonacci(n-2)\n\ndef main():\n\tfibonacci(25)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"eastlakeside/interpy-zh","sub_path":"code/2.7/24_cache.py","file_name":"24_cache.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":6396,"dataset":"github-code","pt":"81"} +{"seq_id":"24907228749","text":"# --------------------------------------------------------------------------------------------\r\n# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License. See License.txt in the project root for license information.\r\n# --------------------------------------------------------------------------------------------\r\n# pylint: disable=line-too-long\r\n# pylint: disable=too-many-lines\r\n# pylint: disable=too-many-statements\r\n\r\nfrom azure.cli.core.commands.parameters import (\r\n tags_type,\r\n get_enum_type,\r\n get_location_type\r\n)\r\n\r\nfrom azure.cli.core.commands.validators import get_default_location_from_resource_group\r\nfrom ._validators import validate_encryption_values\r\n\r\n\r\ndef load_arguments(self, _):\r\n\r\n with self.argument_context('databricks workspace create') as c:\r\n c.argument('workspace_name', options_list=['--name', '-n'], help='The name of the workspace.')\r\n c.argument('tags', tags_type)\r\n c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)\r\n c.argument('managed_resource_group', help='The managed resource group to create. It can be either a name or a resource ID.')\r\n c.argument('custom_virtual_network_id', options_list=['--vnet'], arg_group='Custom VNET', help='Virtual Network name or resource ID.')\r\n c.argument('custom_public_subnet_name', options_list=['--public-subnet'], arg_group='Custom VNET', help='The name of a Public Subnet within the Virtual Network.')\r\n c.argument('custom_private_subnet_name', options_list=['--private-subnet'], arg_group='Custom VNET', help='The name of a Private Subnet within the Virtual Network.')\r\n c.argument('sku_name', options_list=['--sku'], arg_type=get_enum_type(['standard', 'premium', 'trial']), help='The SKU tier name.')\r\n c.argument('prepare_encryption', action='store_true', help='Flag to enable the Managed Identity for managed storage account to prepare for CMK encryption.')\r\n\r\n with self.argument_context('databricks workspace update') as c:\r\n c.argument('workspace_name', options_list=['--name', '-n'], id_part='name', help='The name of the workspace.')\r\n c.argument('tags', tags_type)\r\n c.argument('prepare_encryption', action='store_true', help='Flag to enable the Managed Identity for managed storage account to prepare for CMK encryption.')\r\n c.argument('encryption_key_source', options_list=['--key-source'], arg_group='Encryption', arg_type=get_enum_type(['Default', 'Microsoft.Keyvault']), validator=validate_encryption_values, help='The encryption key source (provider).')\r\n c.argument('encryption_key_name', options_list=['--key-name'], arg_group='Encryption', help='The name of KeyVault key.')\r\n c.argument('encryption_key_version', options_list=['--key-version'], arg_group='Encryption', help='The version of KeyVault key.')\r\n c.argument('encryption_key_vault', options_list=['--key-vault'], arg_group='Encryption', help='The Uri of KeyVault.')\r\n\r\n with self.argument_context('databricks workspace delete') as c:\r\n c.argument('workspace_name', options_list=['--name', '-n'], id_part='name', help='The name of the workspace.')\r\n\r\n with self.argument_context('databricks workspace show') as c:\r\n c.argument('workspace_name', options_list=['--name', '-n'], id_part='name', help='The name of the workspace.')\r\n\r\n with self.argument_context('databricks workspace list') as c:\r\n pass\r\n\r\n with self.argument_context('databricks workspace wait') as c:\r\n c.argument('workspace_name', options_list=['--name', '-n'], id_part='name', help='The name of the workspace.')\r\n","repo_name":"petertuton/azure-cli-extensions","sub_path":"src/databricks/azext_databricks/_params.py","file_name":"_params.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"21119732789","text":"\"\"\"\nTHIS IS THE EXTERNAL-ONLY VERSION OF THIS FILE. G3 HAS ITS OWN.\n\nThis file contains helpers for defining build flags and options that are used to\nconfigure the Skia build.\n\"\"\"\n\nload(\"@bazel_skylib//lib:selects.bzl\", _selects = \"selects\")\n\n# https://github.com/bazelbuild/bazel-skylib/blob/main/rules/common_settings.bzl\nload(\"@bazel_skylib//rules:common_settings.bzl\", \"string_flag\", skylib_bool_flag = \"bool_flag\")\n\n# Re-export other symbols from bazel_skylib for convenience\nselects = _selects\n\n# Forked from https://github.com/bazelbuild/bazel-skylib/blob/main/rules/common_settings.bzl\nBuildSettingInfo = provider(\n doc = \"A singleton provider that contains the raw value of a multi-string build setting\",\n fields = [\"values\"],\n)\n\ndef _multi_string_impl(ctx):\n allowed_values = ctx.attr.values\n values = ctx.build_setting_value\n for v in values:\n if v not in ctx.attr.values:\n fail(\"Error setting \" + str(ctx.label) + \": invalid value '\" + v + \"'. Allowed values are \" + str(allowed_values))\n return BuildSettingInfo(values = values)\n\nmulti_string_flag = rule(\n implementation = _multi_string_impl,\n # https://bazel.build/rules/lib/config#string_list\n build_setting = config.string_list(flag = True, repeatable = True),\n attrs = {\n \"values\": attr.string_list(\n doc = \"The list of allowed values for this setting. An error is raised if any other values are given.\",\n ),\n },\n doc = \"A string-typed build setting that can be set multiple times on the command line\",\n)\n\ndef string_flag_with_values(name, values, default = \"\", multiple = False):\n \"\"\"Create a string flag and corresponding config_settings.\n\n string_flag_with_values is a Bazel Macro that defines a flag with the given name and a set\n of valid values for that flag. For each value, a config_setting is defined with the name\n of the value, associated with the created flag.\n This is defined to make the BUILD.bazel file easier to read w/o the boilerplate of defining\n a string_flag rule and n config_settings\n https://docs.bazel.build/versions/main/skylark/macros.html\n\n Args:\n name: string, the name of the flag to create and use for the config_settings\n values: list of strings, the valid values for this flag to be set to.\n default: string, whatever the default value should be if the flag is not set. Can be\n empty string for both a string_flag and a multi_string flag.\n multiple: boolean, True if the flag should be able to be set multiple times on the CLI.\n \"\"\"\n if multiple:\n multi_string_flag(\n name = name,\n # We have to specify a default value, even if that value is empty string.\n # https://docs.bazel.build/versions/main/skylark/config.html#instantiating-build-settings\n build_setting_default = [default],\n # We need to make sure empty string (the default) is in the list of acceptable values.\n values = values + [\"\"],\n )\n else:\n string_flag(\n name = name,\n # We have to specify a default value, even if that value is empty string.\n # https://docs.bazel.build/versions/main/skylark/config.html#instantiating-build-settings\n build_setting_default = default,\n # We need to make sure empty string (the default) is in the list of acceptable values.\n values = values + [\"\"],\n )\n\n # For each of the values given, we define a config_setting. This allows us to use\n # select statements, on the given setting, e.g. referencing\n # //bazel/common_config_settings:some_valid_value_for_a_flag\n for v in values:\n native.config_setting(\n name = v,\n flag_values = {\n \":\" + name: v,\n },\n visibility = [\"//:__subpackages__\"],\n )\n\ndef bool_flag(name, default):\n \"\"\"Create a boolean flag and corresponding config_settings.\n\n bool_flag is a Bazel Macro that defines a boolean flag with the given name two config_settings,\n one for True, one for False. Reminder that Bazel has special syntax for unsetting boolean flags,\n but this does not work well with aliases.\n https://docs.bazel.build/versions/main/skylark/config.html#using-build-settings-on-the-command-line\n Thus it is best to define both an \"enabled\" alias and a \"disabled\" alias.\n\n Args:\n name: string, the name of the flag to create and use for the config_settings\n default: boolean, if the flag should default to on or off.\n \"\"\"\n\n skylib_bool_flag(name = name, build_setting_default = default)\n vis = [\"//:__subpackages__\"]\n\n native.config_setting(\n name = name + \"_true\",\n flag_values = {\n # The value must be a string, but it will be parsed to a boolean\n # https://docs.bazel.build/versions/main/skylark/config.html#build-settings-and-select\n \":\" + name: \"True\",\n },\n visibility = vis,\n )\n\n native.config_setting(\n name = name + \"_false\",\n flag_values = {\n \":\" + name: \"False\",\n },\n visibility = vis,\n )\n","repo_name":"google/skia","sub_path":"bazel/flags.bzl","file_name":"flags.bzl","file_ext":"bzl","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","stars":8112,"dataset":"github-code","pt":"81"} +{"seq_id":"72174689865","text":"import json\nimport os\nimport shutil\nimport time\nfrom collections import ChainMap\nfrom functools import partial\nfrom pathlib import Path\nfrom subprocess import check_call\nfrom typing import List\n\nHERE = Path(__file__).parent.resolve()\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"myst_parser\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx_copybutton\",\n]\n\nmyst_enable_extensions = [\"html_image\"]\nmyst_heading_anchors = 3\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The file extensions of source files.\n# Sphinx considers the files with this suffix as sources.\n# The value can be a dictionary mapping file extensions to file types.\nsource_suffix = {\".rst\": \"restructuredtext\", \".md\": \"markdown\"}\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"JupyterLab\"\ncopyright = f\"2018-{time.localtime().tm_year}, Project Jupyter\" # noqa\nauthor = \"Project Jupyter\"\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\n_version_py = HERE.parent.parent / \"jupyterlab\" / \"_version.py\"\nversion_ns = {}\n\nexec(_version_py.read_text(), version_ns) # noqa\n\n# The short X.Y version.\nversion = \"{0:d}.{1:d}\".format(*version_ns[\"version_info\"]) # noqa\n# The full version, including alpha/beta/rc tags.\nrelease = version_ns[\"__version__\"]\n\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\nlanguage = \"en\" # Must be set from the command line to generate various languages\n\nlocale_dirs = [\"locale/\"]\ngettext_compact = False\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# build js docs and stage them to the build directory\ndef build_api_docs(out_dir: Path):\n \"\"\"build js api docs\"\"\"\n docs = HERE.parent\n root = docs.parent\n docs_api = docs / \"source\" / \"api\"\n api_index = docs_api / \"index.html\"\n # is this an okay way to specify jlpm\n # without installing jupyterlab first?\n jlpm = [\"node\", str(root / \"jupyterlab\" / \"staging\" / \"yarn.js\")]\n\n if api_index.exists():\n # avoid rebuilding docs because it takes forever\n # `make clean` to force a rebuild\n pass\n else:\n check_call(jlpm, cwd=str(root)) # noqa S603\n check_call([*jlpm, \"build:packages\"], cwd=str(root)) # noqa S603\n check_call([*jlpm, \"docs\"], cwd=str(root)) # noqa S603\n\n dest_dir = out_dir / \"api\"\n if dest_dir.exists():\n shutil.rmtree(str(dest_dir))\n shutil.copytree(str(docs_api), str(dest_dir))\n\n\n# Copy frontend files for snippet inclusion\nFILES_LIST = [ # File paths should be relative to jupyterlab root folder\n \"packages/settingregistry/src/plugin-schema.json\"\n]\nSNIPPETS_FOLDER = \"snippets\"\n\n\ndef copy_code_files(temp_folder: Path):\n \"\"\"Copy files in the temp_folder\"\"\"\n docs = HERE.parent\n root = docs.parent\n\n for file in FILES_LIST:\n target = temp_folder / file\n if not target.parent.exists():\n target.parent.mkdir(parents=True, exist_ok=True)\n shutil.copyfile(str(root / file), str(target))\n\n # Split plugin schema to ease documentation maintenance\n if file == \"packages/settingregistry/src/plugin-schema.json\":\n schema = json.loads(Path(target).read_text())\n\n partial_schema = ChainMap(schema.get(\"definitions\", {}), schema.get(\"properties\", {}))\n for key in partial_schema:\n fragment = target.parent / f\"{key}.json\"\n with fragment.open(\"w\") as f:\n json.dump({key: partial_schema[key]}, f, indent=2)\n\n\nIMAGES_FOLDER = \"images\"\nAUTOMATED_SCREENSHOTS_FOLDER = \"galata/test/documentation\"\n\n\ndef copy_automated_screenshots(temp_folder: Path) -> List[Path]:\n \"\"\"Copy PlayWright automated screenshots in documentation folder.\n\n Args:\n temp_folder: Target directory in which to copy the file\n Returns:\n List of copied files\n \"\"\"\n docs = HERE.parent\n root = docs.parent\n\n src = root / AUTOMATED_SCREENSHOTS_FOLDER\n\n copied_files = []\n for img in src.rglob(\"*.png\"):\n target = temp_folder / (img.name.replace(\"-documentation-linux\", \"\"))\n shutil.copyfile(str(img), str(target))\n copied_files.append(target)\n\n return copied_files\n\n\nCOMMANDS_LIST_PATH = \"commands.test.ts-snapshots/commandsList-documentation-linux.json\"\nCOMMANDS_LIST_DOC = \"user/commands_list.md\"\nPLUGINS_LIST_PATH = \"plugins.test.ts-snapshots/plugins-documentation-linux.json\"\nPLUGINS_LIST_DOC = \"extension/plugins_list.rst\"\nTOKENS_LIST_PATH = \"plugins.test.ts-snapshots/tokens-documentation-linux.json\"\nTOKENS_LIST_DOC = \"extension/tokens_list.rst\"\n\n\ndef document_commands_list(temp_folder: Path) -> None:\n \"\"\"Generate the command list documentation page from application extraction.\"\"\"\n list_path = HERE.parent.parent / AUTOMATED_SCREENSHOTS_FOLDER / COMMANDS_LIST_PATH\n\n commands_list = json.loads(list_path.read_text())\n\n template = \"\"\"| Command id | Label | Shortcuts |\n| ---------- | ----- | --------- |\n\"\"\"\n\n for command in sorted(commands_list, key=lambda c: c[\"id\"]):\n for key in (\"id\", \"label\", \"caption\"):\n if key not in command:\n command[key] = \"\"\n else:\n command[key] = command[key].replace(\"\\n\", \" \")\n shortcuts = command.get(\"shortcuts\", [])\n command[\"shortcuts\"] = (\n \"\" + \", \".join(shortcuts) + \"\" if len(shortcuts) else \"\"\n )\n\n template += \"| `{id}` | {label} | {shortcuts} |\\n\".format(**command)\n\n (temp_folder / COMMANDS_LIST_DOC).write_text(template)\n\n\ndef document_plugins_tokens_list(list_path: Path, output_path: Path) -> None:\n \"\"\"Generate the plugins list documentation page from application extraction.\"\"\"\n items = json.loads(list_path.read_text())\n\n template = \"\"\n\n for _name, _description in items.items():\n template += f\"- ``{_name}``: {_description}\\n\"\n\n output_path.write_text(template)\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_logo = \"_static/logo-rectangle.svg\"\nhtml_favicon = \"_static/logo-icon.png\"\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"icon_links\": [\n {\n \"name\": \"jupyter.org\",\n \"url\": \"https://jupyter.org\",\n \"icon\": \"_static/jupyter_logo.svg\",\n \"type\": \"local\",\n },\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/jupyterlab/jupyterlab\",\n \"icon\": \"fab fa-github-square\",\n },\n {\n \"name\": \"Discourse\",\n \"url\": \"https://discourse.jupyter.org/c/jupyterlab/17\",\n \"icon\": \"fab fa-discourse\",\n },\n {\n \"name\": \"Gitter\",\n \"url\": \"https://gitter.im/jupyterlab/jupyterlab\",\n \"icon\": \"fab fa-gitter\",\n },\n ],\n \"logo\": {\n \"image_light\": \"_static/logo-rectangle.svg\",\n \"image_dark\": \"_static/logo-rectangle-dark.svg\",\n \"alt_text\": \"JupyterLab\",\n },\n \"use_edit_page_button\": True,\n \"navbar_align\": \"left\",\n \"navbar_start\": [\"navbar-logo\", \"version-switcher\"],\n \"footer_start\": [\"copyright.html\"],\n \"switcher\": {\n # Trick to get the documentation version switcher to always points to the latest version without being corrected by the integrity check;\n # otherwise older versions won't list newer versions\n \"json_url\": \"/\".join(\n (\"https://jupyterlab.readthedocs.io/en\", \"latest\", \"_static/switcher.json\")\n ),\n \"version_match\": os.environ.get(\"READTHEDOCS_VERSION\", \"latest\"),\n },\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n \"index\": [], # Home page has no sidebar so there's more room for content\n \"**\": [\"sidebar-nav-bs.html\"],\n}\n\n# Output for github to be used in links\nhtml_context = {\n \"github_user\": \"jupyterlab\", # Username\n \"github_repo\": \"jupyterlab\", # Repo name\n \"github_version\": \"main\", # Version\n \"doc_path\": \"docs/source/\", # Path in the checkout to the docs root\n}\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"JupyterLabdoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"JupyterLab.tex\",\n \"JupyterLab Documentation\",\n \"Project Jupyter\",\n \"manual\",\n ),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"jupyterlab\", \"JupyterLab Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"JupyterLab\",\n \"JupyterLab Documentation\",\n author,\n \"JupyterLab\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\n\n\ndef setup(app):\n dest = HERE / \"getting_started/changelog.md\"\n shutil.copy(str(HERE.parent.parent / \"CHANGELOG.md\"), str(dest))\n app.add_css_file(\"css/custom.css\") # may also be an URL\n # Skip we are dealing with internationalization\n outdir = Path(app.outdir)\n if outdir.name != \"gettext\":\n build_api_docs(outdir)\n\n copy_code_files(Path(app.srcdir) / SNIPPETS_FOLDER)\n tmp_files = copy_automated_screenshots(Path(app.srcdir) / IMAGES_FOLDER)\n\n def clean_code_files(tmp_files, app, exception):\n \"\"\"Remove temporary folder.\"\"\"\n try:\n shutil.rmtree(str(Path(app.srcdir) / SNIPPETS_FOLDER))\n except Exception: # noqa S110\n pass\n\n for f in tmp_files:\n f.unlink()\n\n src_dir = Path(app.srcdir)\n document_commands_list(src_dir)\n document_plugins_tokens_list(\n HERE.parent.parent / AUTOMATED_SCREENSHOTS_FOLDER / PLUGINS_LIST_PATH,\n src_dir / PLUGINS_LIST_DOC,\n )\n document_plugins_tokens_list(\n HERE.parent.parent / AUTOMATED_SCREENSHOTS_FOLDER / TOKENS_LIST_PATH,\n src_dir / TOKENS_LIST_DOC,\n )\n\n app.connect(\"build-finished\", partial(clean_code_files, tmp_files))\n","repo_name":"jupyterlab/jupyterlab","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":13046,"program_lang":"python","lang":"en","doc_type":"code","stars":13417,"dataset":"github-code","pt":"81"} +{"seq_id":"19465877830","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 13 20:31:33 2021\r\n\r\n@author: annice\r\n\r\nDescription: According to the Vision Council, 75% of adults use glasses \r\nor contact lenses. Staring at computer, smartphone and other digital screens \r\nis known to play an important factor in permanently damaging you eyes and \r\nincreasing the need to use corrective glasses or contact lenses. It is \r\nsuggested that after looking at digital screens for 20 minutes you take a 20 \r\nsecond break to look 20 meters away. \r\nIn the following program we make an application that warns you every 20 mins\r\nto look 20 meters away for 20 secs. The application lets you know when 20 mins\r\nor 20 secs has passed by showing a message using the messagebox module. \r\n\r\nInstruction: \r\n I) First create the UI. Look at the file provided for the \r\nUI and head over to the UI section below and use tkinter to build a similar UI.\r\n II) write a function for starting the timer.\r\n III) write a function for counting down and a function to reset the timer\r\n\r\n\"\"\"\r\n\r\n# ---------------------------- Use the modules imported below to write your program ------------------------------- #\r\n\r\nimport math\r\nimport tkinter\r\nfrom tkinter import messagebox\r\n\r\n# ---------------------------- Use the constants below in your program ------------------------------- #\r\n#The color scheme has been defined for you below. Feel free to play around \r\n#with it later. You can explore other color schemes on colorhunt.co\r\n\r\nTEAL = \"#79A3B1\"\r\nLIGHTTEAL = \"#D0E8F2\"\r\nDARKTEAL = \"#456268\"\r\nBEIGE = \"#FCF8EC\"\r\nGREEN = \"#7FC8A9\"\r\nFONT_NAME = \"Atkinson\"\r\nWORK_MIN = 1\r\nSHORT_BREAK_SEC = 20\r\n# ---------------------------- Variables ------------------------------- #\r\nreps=0\r\ntimer = None\r\n\r\n# ---------------------------- write a function for resetting timer below ------------------------------- # \r\ndef reset_timer():\r\n '''\r\n Returns nothing. Changes the text shown on the top to \"Timer\" and sets the \r\n timer equal to 00:00.\r\n \r\n '''\r\n return 0\r\n \r\n# ---------------------------- write a function to start the timer below------------------------------- # \r\ndef start_btn_clicked():\r\n '''\r\n Returns nothing. Changes the text shown on the top to either \"Work\" or \r\n \"Break! Look Away\" and calls the count_down function.\r\n \r\n '''\r\n return 0\r\n\r\n# ---------------------------- write a function for counting down ------------------------------- # \r\ndef count_down(count):\r\n '''\r\n Returns nothing. Changes the text showing the time remaining. \r\n\r\n Parameters:\r\n count (int):The string which is to be reversed.\r\n\r\n '''\r\n return 0\r\n\r\n# ---------------------------- UI ------------------------------- #\r\nwindow = tkinter.Tk()\r\n\r\nwindow.mainloop()\r\n\r\n ","repo_name":"TAMU-BMEN207/202020_Rule_App","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29215064602","text":"import json\nimport requests\nimport ntpath\nSAVE_FILE_PATH = \"saves/save.json\"\nPRINT_FILE_PATH = \"data.js\"\nGRID_FILE_PATH = \"grid.html\"\n\n\ndef load(filename=None):\n global SAVE_FILE_PATH\n if type(filename) is str and filename:\n print(\"writing filename: \" + filename)\n SAVE_FILE_PATH = filename\n try:\n file = open(SAVE_FILE_PATH, \"r\")\n save_str = file.read()\n save_obj = json.loads(save_str)\n file.close()\n return save_obj\n except:\n return None\n\ndef getSaveFile():\n return ntpath.basename(SAVE_FILE_PATH)\n\ndef getFullSaveFilePath():\n return SAVE_FILE_PATH\n\ndef save(frames, names, sequence_name, filename=None):\n global SAVE_FILE_PATH\n if type(filename) is str and filename:\n print(\"writing filename: \" + filename)\n SAVE_FILE_PATH = filename\n save = dict()\n save[\"frames\"] = frames\n save[\"names\"] = names\n save[\"sequence\"] = sequence_name\n sequence = frames[sequence_name]\n save[\"rows\"] = len(sequence[0])\n save[\"cols\"] = len(sequence[0][0])\n file = open(SAVE_FILE_PATH, \"w\")\n file.write(json.dumps(save))\n file.close()\n\ndef printString(string):\n file = open(PRINT_FILE_PATH, \"w\")\n file.write(string)\n file.close()\n\ndef printArray(ra):\n TEMPLATE = \"const allFrames = %s\"\n file = open(PRINT_FILE_PATH, \"w\")\n out = {\"data\": TEMPLATE % json.dumps(ra)};\n file.write(out[\"data\"])\n file.close()\n requests.post(\"https://umbrella-script-php.joew3947.repl.co/update.php\", data=out)\n\ndef printGrid(names):\n out = \"\"\n for x in names:\n out += \"\"\n for y in x:\n out += \"\"\n out += \"\"\n out += \"
    \" + y + \"
    \"\n out += \"\"\n out += \"\"\n file = open(GRID_FILE_PATH, \"w\")\n file.write(out)\n file.close()","repo_name":"JoeWildfong/Umbrella-Planner","sub_path":"files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38842905468","text":"from tkinter import ttk\nfrom tkinter import * \n\nimport sqlite3\n\nclass Product: \n\n dbName = 'database.db'\n\n def __init__(self, window):\n self.wind = window\n self.wind.title('Products Application')\n\n #Creating a Frame Container\n frame = LabelFrame(self.wind, text = 'Register a new Product')\n frame.grid(row = 0, column = 0, columnspan = 3, pady = 20)\n\n #Name Input\n Label(frame, text = 'Name: ').grid(row = 1, column = 0)\n self.name = Entry(frame)\n self.name.focus()\n self.name.grid(row = 1, column = 1)\n\n #Price Input\n Label(frame, text = 'Price: ').grid(row = 2, column = 0)\n self.price = Entry(frame)\n self.price.grid(row = 2, column = 1)\n\n #Output Messages\n self.message = Label(text = '', fg = 'red')\n self.message.grid(row = 3, column = 0, columnspan = 2, sticky = W + E)\n\n #Table \n self.tree = ttk.Treeview(height = 10, columns = 2)\n self.tree.grid(row = 4, column = 0, columnspan = 2)\n self.tree.heading('#0', text = 'Name', anchor = CENTER)\n self.tree.heading('#1', text = 'Price', anchor = CENTER)\n \n #Button Add Product\n ttk.Button(frame, text = \"Save Product\", command = self.AddProduct).grid(row = 3, columnspan = 2, sticky = W + E)\n\n #Button Delete Product\n ttk.Button(text = 'DELETE', command = self.DeleteProduct).grid(row = 5, column = 0, sticky = W + E)\n \n #Button Edit Product\n ttk.Button(text = 'EDIT', command = self.EditProduct).grid(row = 5, column = 1, sticky = W + E)\n\n #Filling the Rows\n self.GetProducts()\n\n \n def RunQuery(self, query, parameters = ()):\n #Interact with DB\n with sqlite3.connect(self.dbName) as conn: \n cursor = conn.cursor()\n result = cursor.execute(query, parameters)\n conn.commit()\n return result\n\n def Validation(self):\n return len(self.name.get()) != 0 and len(self.price.get()) != 0\n\n###########################################################################################################################################\n###############################################################---CRUD---##################################################################\n###########################################################################################################################################\n\n def GetProducts(self): \n #Cleaning Table\n \n records = self.tree.get_children()\n for element in records:\n self.tree.delete(element)\n \n #Querying Data\n\n query = 'SELECT * FROM product ORDER BY name DESC'\n dbRows = self.RunQuery(query)\n for row in dbRows:\n self.tree.insert('', 0, text = row[1], value = row[2])\n\n def AddProduct(self): \n if self.Validation(): \n query = 'INSERT INTO product VALUES(NULL, ?, ?)'\n parameters = (self.name.get(), self.price.get())\n self.RunQuery(query, parameters)\n self.message['text'] = 'Product {} added Succesfully'.format(self.name.get())\n self.name.delete(0, END)\n self.price.delete(0, END)\n else: \n self.message['text'] = 'Name or Price Required'\n self.GetProducts()\n\n def DeleteProduct(self):\n self.message['text'] = ''\n try:\n self.tree.item(self.tree.selection())['text'][0]\n except IndexError as e:\n self.message['text'] = 'Please Select a Record'\n print(e)\n return\n self.message['text'] = ''\n name = self.tree.item(self.tree.selection())['text']\n query = 'DELETE FROM product WHERE name = ?'\n self.RunQuery(query, (name, ))\n self.message['text'] = 'Record {} deleted Succesfully'.format(name)\n self.GetProducts()\n\n def EditProduct(self):\n self.message['text'] = ''\n try:\n self.tree.item(self.tree.selection())['text'][0]\n except IndexError as e:\n self.message['text'] = 'Please Select a Record'\n print(e)\n return\n name = self.tree.item(self.tree.selection())['text']\n oldPrice = self.tree.item(self.tree.selection())['values'][0]\n self.editWind = Toplevel()\n self.editWind.title = 'Edit Product'\n\n #Old Name\n Label(self.editWind, text = 'Old Name: ').grid(row = 0, column = 1)\n Entry(self.editWind, textvariable = StringVar(self.editWind, value = name), state = 'readonly').grid(row = 0, column = 2)\n\n #New Name\n Label(self.editWind, text = 'New Name: ').grid(row = 1, column = 1)\n newName = Entry(self.editWind)\n newName.grid(row = 1, column = 2)\n\n #Old Price\n Label(self.editWind, text = 'Old Price: ').grid(row = 2, column = 1)\n Entry(self.editWind, textvariable= StringVar(self.editWind, value = oldPrice), state = 'readonly').grid(row = 2, column = 2)\n\n #New Price\n Label(self.editWind, text = 'New Price: ').grid(row = 3, column = 1)\n newPrice = Entry(self.editWind)\n newPrice.grid(row = 3, column = 2)\n\n #Update Button\n Button(self.editWind, text = 'Update', command = lambda: self.EditRecords(newName.get(), name, newPrice.get(), oldPrice)).grid(row = 4, column = 2, sticky = W)\n\n def EditRecords(self, newName, name, newPrice, oldPrice): \n query = 'UPDATE product SET name = ?, price = ? WHERE name = ? AND price = ?' \n parameters = (newName, newPrice, name, oldPrice)\n self.RunQuery(query, parameters)\n self.editWind.destroy()\n self.message['text'] = 'Record {} updated Successfully'.format(name)\n self.GetProducts()\n\nif __name__ == '__main__':\n window = Tk()\n application = Product(window)\n window.mainloop() ","repo_name":"bonino97/CRUD-Tkinter","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71690461066","text":"import keys\nimport tweepy\nimport json\nimport requests\nimport time\nfrom functions import *\n\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\ndef check_id_in_file(id):\n with open('replied_tweets.txt' , 'r') as read_obj:\n for line in read_obj:\n if(id in line):\n return True\n return False\n\n\ndef main():\n mentions = api.mentions_timeline()\n myFile = open('replied_tweets.txt' , 'a')\n for mention in mentions:\n mention_id = str(mention.id)\n if(('#whatpokemonami' in mention.text) and (check_id_in_file(mention_id) == False)):\n myFile.write(str(mention.id))\n myFile.write(\"\\n\")\n pokemon = get_pokemon(mention.text)\n tweet_status = (\"@\" + mention.user.screen_name + \" \" + \"Based on your birthday you are: \\n\" + \"NAME: \"\n + pokemon['name'] + \"\\nPOKEDEX ENTRY: \" + str(pokemon['index']) + \"\\nTYPES: \" + pokemon['type'])\n file = \"pokemon.png\"\n status = api.update_with_media(file, tweet_status)\n print(\"Replied to @\" + mention.user.screen_name + \"\\n\")\n\n\n\n\nwhile True:\n main()\n print(\"Waiting for Tweet\")\n time.sleep(5)\n","repo_name":"kylejava/SharkHacks","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38416954728","text":"import torch\nfrom torch import nn\n\n# 基础卷积带BN\nclass BasicConv2d_BN(nn.Module):\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d_BN, self).__init__() # 固定结构\n self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels)\n self.leaky_relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.leaky_relu(x)\n return x\n\n\nclass BasicConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.relu(x)\n return x\n\n\nclass Unet1(nn.Module):\n def __init__(self):\n super(Unet1, self).__init__()\n\n #Sequential把两个卷积层绑在一起\n self.branch1 = nn.Sequential(\n BasicConv2d_BN(3, 8, kernel_size=3, padding=1),\n BasicConv2d_BN(8, 8, kernel_size=3, padding=1)\n )\n self.maxpool = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)\n self.branch2 = nn.Sequential(\n BasicConv2d_BN(8, 16, kernel_size=3, padding=1),\n BasicConv2d_BN(16, 16, kernel_size=3, padding=1)\n )\n self.branch3 = nn.Sequential(\n BasicConv2d_BN(16, 32, kernel_size=3, padding=1),\n BasicConv2d_BN(32, 32, kernel_size=3, padding=1)\n )\n\n self.branch4 = nn.Sequential(\n BasicConv2d_BN(32, 64, kernel_size=3, padding=1),\n BasicConv2d_BN(64, 64, kernel_size=3, padding=1)\n )\n self.branch5 = nn.Sequential(\n BasicConv2d_BN(64, 128, kernel_size=3, padding=1),\n nn.Dropout(0.5),\n BasicConv2d_BN(128, 128, kernel_size=3, padding=1),\n nn.Dropout(0.5)\n )\n\n self.convt1_1 = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='nearest'),\n BasicConv2d(128, 64, kernel_size=3, stride=1, padding=1)\n )\n\n self.Dropout_1 = nn.Dropout(0.5)\n self.convt1_2 = BasicConv2d_BN(128, 64, kernel_size=3, padding=1)\n self.convt1_3 = BasicConv2d_BN(64, 64, kernel_size=3, padding=1)\n\n self.convt2_1 = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='nearest'),\n BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1)\n )\n # self.convt2_1 = BasicConv2dT_BN(64, 32, kernel_size=3,stride=2,padding=1)\n\n self.convt2_2 = BasicConv2d_BN(64, 32, kernel_size=3, padding=1)\n self.convt2_3 = BasicConv2d_BN(32, 32, kernel_size=3, padding=1)\n\n self.convt3_1 = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='nearest'),\n BasicConv2d(32, 16, kernel_size=3, stride=1, padding=1)\n )\n # self.convt3_1 = BasicConv2dT_BN(32, 16, kernel_size=3, stride=2,padding=1)\n\n self.convt3_2 = BasicConv2d_BN(32, 16, kernel_size=3, padding=1)\n self.convt3_3 = BasicConv2d_BN(16, 16, kernel_size=3, padding=1)\n\n # self.convt4_1 = BasicConv2dT_BN(16, 8, kernel_size=3, stride=2,padding=1)\n self.convt4_1 = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='nearest'),\n BasicConv2d(16, 8, kernel_size=3, stride=1, padding=1)\n )\n self.convt4_2 = BasicConv2d_BN(16, 8, kernel_size=3, padding=1)\n self.convt4_3 = BasicConv2d_BN(8, 8, kernel_size=3, padding=1)\n\n self.conv5 = BasicConv2d(8, 3, kernel_size=1, stride=1, padding=0)\n\n def forward(self, x):\n x1 = self.branch1(x)\n x11 = self.maxpool(x1)\n\n x2 = self.branch2(x11)\n x22 = self.maxpool(x2)\n\n x3 = self.branch3(x22)\n x33 = self.maxpool(x3)\n\n x4 = self.branch4(x33)\n x44 = self.maxpool(x4)\n\n x5 = self.branch5(x44)\n\n x = self.convt1_1(x5)\n x = torch.cat([x4, x], 1) #特征融合,按照通道拼接,1是通道维\n x = self.convt1_2(x)\n x = self.convt1_3(x)\n\n x = self.convt2_1(x)\n x = torch.cat([x3, x], 1)\n x = self.Dropout_1(x)\n x = self.convt2_2(x)\n x = self.convt2_3(x)\n\n x = self.convt3_1(x)\n x = torch.cat([x2, x], 1)\n x = self.Dropout_1(x)\n x = self.convt3_2(x)\n x = self.convt3_3(x)\n\n x = self.convt4_1(x)\n x = torch.cat([x1, x], 1)\n x = self.Dropout_1(x)\n x = self.convt4_2(x)\n x = self.convt4_3(x)\n\n x = self.Dropout_1(x)\n x = self.conv5(x)\n\n return x\n\n","repo_name":"LeoLuo0115/MachineLearning_LicensePlateDetation_Project-","sub_path":"Unet1.py","file_name":"Unet1.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32421993829","text":"from django.db import transaction\nfrom django.http import JsonResponse\nfrom django.templatetags.static import static\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer\n\nfrom .geo_services import geocoding\nfrom .models import Order, OrderProduct, Product\n\n\ndef banners_list_api(request):\n # FIXME move data to db?\n return JsonResponse([\n {\n 'title': 'Burger',\n 'src': static('burger.jpg'),\n 'text': 'Tasty Burger at your door step',\n },\n {\n 'title': 'Spices',\n 'src': static('food.jpg'),\n 'text': 'All Cuisines',\n },\n {\n 'title': 'New York',\n 'src': static('tasty.jpg'),\n 'text': 'Food is incomplete without a tasty dessert',\n }\n ], safe=False, json_dumps_params={\n 'ensure_ascii': False,\n 'indent': 4,\n })\n\n\ndef product_list_api(request):\n products = Product.objects.select_related('category').available()\n\n dumped_products = []\n for product in products:\n dumped_product = {\n 'id': product.id,\n 'name': product.name,\n 'price': product.price,\n 'special_status': product.special_status,\n 'description': product.description,\n 'category': {\n 'id': product.category.id,\n 'name': product.category.name,\n },\n 'image': product.image.url,\n 'restaurant': {\n 'id': product.id,\n 'name': product.name,\n }\n }\n dumped_products.append(dumped_product)\n return JsonResponse(dumped_products, safe=False, json_dumps_params={\n 'ensure_ascii': False,\n 'indent': 4,\n })\n\n\nclass OrderProductSerializer(ModelSerializer):\n class Meta:\n model = OrderProduct\n fields = ['product', 'quantity']\n\n\nclass OrderSerializer(ModelSerializer):\n products = OrderProductSerializer(many=True, write_only=True, allow_empty=False)\n\n class Meta:\n model = Order\n fields = ['id', 'products', 'firstname', 'lastname', 'phonenumber', 'address']\n\n\n@transaction.atomic\n@api_view(['POST'])\ndef register_order(request):\n serializer = OrderSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n order = Order.objects.create(\n firstname=serializer.validated_data['firstname'],\n lastname=serializer.validated_data['lastname'],\n phonenumber=serializer.validated_data['phonenumber'],\n address=serializer.validated_data['address'],\n )\n\n products_fields = serializer.validated_data['products']\n\n for order_item in products_fields:\n OrderProduct.objects.create(\n order=order,\n product=order_item['product'],\n quantity=order_item['quantity'],\n price=order_item['product'].price\n )\n\n response = OrderSerializer(order)\n\n geocoding(serializer.validated_data['address'])\n\n return Response(response.data, status=status.HTTP_201_CREATED)\n","repo_name":"tumkir/dvmn_burger_delivery","sub_path":"foodcartapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8007927743","text":"import torch\nimport numpy as np\nfrom train import load_model_predict\nfrom csv2npz import parse_csv\n\n\ndef main(args):\n cuda = not args.no_cuda and torch.cuda.is_available()\n\n with torch.no_grad():\n model, x_norm, y_norm = load_model_predict(args.model, cuda)\n # myString = \",\".join(myList) change a list to string\n #\n X = np.clip(parse_csv(open(args.input), header=1), 0, x_norm) / x_norm\n X = torch.from_numpy(np.expand_dims(np.expand_dims(X, 0), 0)).float()\n\n if cuda:\n X = X.cuda()\n\n Y, _ = model(X)\n\n if cuda:\n Y = Y.cpu()\n\n arr = np.squeeze(Y.data.numpy()) * y_norm\n print(arr.shape)\n np.savetxt(args.output, arr, delimiter=\",\")\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='CAE Encoder')\n\n # data options\n parser.add_argument('--input', type=str, required=True, help='input csv files, separated by comma')\n parser.add_argument('--model', type=str, required=True, help='model file')\n parser.add_argument('--output', type=str, required=True, help='ouput csv file')\n\n # settings\n parser.add_argument('--no_cuda', action='store_true', default=False,\n help='disable CUDA')\n main(parser.parse_args())\n","repo_name":"FangcaoXu/AutoEncoder_Target-Detection_LWIR","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3219787004","text":"from typing import List, Union\nfrom django.db import transaction\nfrom django.apps import apps\nfrom ....tools import singleton\nfrom ..interfaces import IActivity_DAO\n\n\nActivity = apps.get_model('main_app', 'Activity')\n\n@singleton\nclass MActivity_DAO(IActivity_DAO):\n def select_all(self) -> List[Activity]:\n activities = Activity.objects.all()\n return activities if activities else False \n \n def find_by_id(self, id : int) -> Union[Activity, bool]:\n activity = Activity.objects.filter(activity_id=id).first()\n return activity if activity else False\n \n def find_by_name(self, name : str) -> Union[Activity, bool]:\n activity = Activity.objects.filter(name_activity=name).first()\n return activity if activity else False \n \n def insert(self, activities : List[Activity]):\n with transaction.atomic():\n Activity.objects.bulk_create(activities)\n \n def update(self, activities : List[Activity]):\n with transaction.atomic():\n for activity in activities:\n Activity.objects.filter(activity_id=activity.activity_id).update(\n name_activity=activity.name_activity,\n category_id=activity.category.category_id,\n )\n \n def delete(self, activities : List[Activity]):\n with transaction.atomic():\n for activity in activities:\n Activity.objects.filter(activity_id=activity.activity_id).delete()","repo_name":"n1mets/djangoukd","sub_path":"time_tracking/main_app/models/dao/mysql_impl/mactivity_dao.py","file_name":"mactivity_dao.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35654112136","text":"from unittest.mock import patch\n\nimport pytest\nfrom botocore.exceptions import ClientError\n\nimport queue_listener\n\n\n# TRUTHY\n@patch(\n \"queue_listener.s3_client.head_object\",\n return_value={\n \"ResponseMetadata\": {\"HTTPHeaders\": {\"x-amz-meta-pdfsource\": \"custom-pdfs\"}}\n },\n)\ndef test_would_replace_is_custom(head_object):\n \"\"\"There is a pdfsource, but it is custom-pdfs\"\"\"\n assert queue_listener.would_replace_custom_pdf(\"\", \"\")\n head_object.assert_called_once()\n\n\n# FALSEY\n@patch(\n \"queue_listener.s3_client.head_object\",\n return_value={\n \"ResponseMetadata\": {\"HTTPHeaders\": {\"x-amz-meta-pdfsource\": \"kitten\"}}\n },\n)\ndef test_would_replace_not_custom(head_object):\n \"\"\"There is a pdfsource, but it isn't custom-pdfs\"\"\"\n assert not queue_listener.would_replace_custom_pdf(\"\", \"\")\n head_object.assert_called_once()\n\n\n@patch(\"queue_listener.s3_client.head_object\", return_value={})\ndef test_would_replace_is_empty(head_object):\n \"\"\"There is a file, but no pdfsource header at all\"\"\"\n assert not queue_listener.would_replace_custom_pdf(\"\", \"\")\n head_object.assert_called_once()\n\n\n@patch(\n \"queue_listener.s3_client.head_object\",\n side_effect=ClientError(\n error_response={\"Error\": {\"Message\": \"Not Found\"}}, operation_name=\"\"\n ),\n)\ndef test_would_replace_is_not_found(head_object):\n \"\"\"There is no such file, so there's nothing to be worried about overwriting\"\"\"\n assert not queue_listener.would_replace_custom_pdf(\"\", \"\")\n head_object.assert_called_once()\n\n\n# ERRORY\n@patch(\n \"queue_listener.s3_client.head_object\",\n side_effect=ClientError(\n error_response={\"Error\": {\"Message\": \"Out Of Cheese\"}}, operation_name=\"\"\n ),\n)\ndef test_would_replace_is_bad_response(head_object):\n \"\"\"An unexpected error occurred, so we re-raise\"\"\"\n with pytest.raises(ClientError):\n queue_listener.would_replace_custom_pdf(\"\", \"\")\n head_object.assert_called_once()\n","repo_name":"nationalarchives/ds-caselaw-pdf-conversion","sub_path":"queue_listener/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3955493766","text":"def largestPrimeFactor(n):\n i = 2\n while i < n:\n while n % i == 0:\n n//= i\n i += 1\n return n\n\ndef main():\n for i in range(20):\n print(i, largestPrimeFactor(i))\n print(largestPrimeFactor(600851475143))\n\nif __name__ == '__main__':\n main()","repo_name":"pvillano/projecteuler","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41116116787","text":"# Licensed under an MIT open source license - see LICENSE\nfrom __future__ import print_function, absolute_import, division\n\nimport numpy as np\nimport statsmodels.api as sm\nimport warnings\nimport astropy.units as u\nfrom numpy.fft import fftshift\n\nfrom .lm_seg import Lm_Seg\nfrom .psds import pspec, make_radial_freq_arrays\nfrom .fitting_utils import clip_func, residual_bootstrap\nfrom .elliptical_powerlaw import (fit_elliptical_powerlaw,\n inverse_interval_transform,\n inverse_interval_transform_stderr)\nfrom .apodizing_kernels import *\nfrom .rfft_to_fft import rfft_to_fft\n\n\nclass StatisticBase_PSpec2D(object):\n \"\"\"\n Common features shared by 2D power spectrum methods.\n \"\"\"\n\n @property\n def ps2D(self):\n '''\n Two-dimensional power spectrum.\n '''\n return self._ps2D[::-1]\n\n @property\n def ps1D(self):\n '''\n One-dimensional power spectrum.\n '''\n return self._ps1D\n\n @property\n def ps1D_stddev(self):\n '''\n 1-sigma standard deviation of the 1D power spectrum.\n '''\n\n return self._ps1D_stddev\n\n @property\n def freqs(self):\n '''\n Corresponding spatial frequencies of the 1D power spectrum.\n '''\n return self._freqs\n\n @property\n def wavenumbers(self):\n return self._freqs * min(self._ps2D.shape)\n\n def compute_beam_pspec(self):\n '''\n Compute the power spectrum of the beam element.\n '''\n if not hasattr(self, '_beam'):\n raise AttributeError(\"Beam correction cannot be applied since\"\n \" no beam object was given.\")\n\n beam_kern = self._beam.as_kernel(self._ang_size,\n y_size=self._ps2D.shape[0],\n x_size=self._ps2D.shape[1])\n\n beam_fft = fftshift(rfft_to_fft(beam_kern.array))\n\n self._beam_pow = np.abs(beam_fft**2)\n\n # Avoid infs when dividing out by the beam power spectrum\n self._beam_pow[self._beam_pow == 0.0] = np.NaN\n\n def compute_radial_pspec(self, logspacing=False, max_bin=None, **kwargs):\n '''\n Computes the radially averaged power spectrum.\n\n Parameters\n ----------\n logspacing : bool, optional\n Return logarithmically spaced bins for the lags.\n max_bin : float, optional\n Maximum spatial frequency to bin values at.\n kwargs : passed to `~turbustat.statistics.psds.pspec`.\n '''\n\n # Check if azimuthal constraints are given\n if \"theta_0\" in kwargs:\n azim_constraint_flag = True\n else:\n azim_constraint_flag = False\n\n out = pspec(self.ps2D, return_stddev=True,\n logspacing=logspacing, max_bin=max_bin, **kwargs)\n\n self._azim_constraint_flag = azim_constraint_flag\n\n if azim_constraint_flag:\n self._freqs, self._ps1D, self._ps1D_stddev, self._azim_mask = out\n else:\n self._freqs, self._ps1D, self._ps1D_stddev = out\n\n # Attach units to freqs\n self._freqs = self.freqs / u.pix\n\n def fit_pspec(self, fit_unbinned=False,\n brk=None, log_break=False, low_cut=None,\n high_cut=None, min_fits_pts=10, weighted_fit=False,\n bootstrap=False, bootstrap_kwargs={},\n verbose=False):\n '''\n Fit the 1D Power spectrum using a segmented linear model. Note that\n the current implementation allows for only 1 break point in the\n model. If the break point is estimated via a spline, the breaks are\n tested, starting from the largest, until the model finds a good fit.\n\n Parameters\n ----------\n fit_unbinned : bool, optional\n Fits the unbinned 2D power-spectrum to the linear model. Default is True.\n Use False to fit the binned 1D power-spectrum instead and replicate fitting\n in earlier TurbuStat versions.\n brk : float or None, optional\n Guesses for the break points. If given as a list, the length of\n the list sets the number of break points to be fit. If a choice is\n outside of the allowed range from the data, Lm_Seg will raise an\n error. If None, a spline is used to estimate the breaks.\n log_break : bool, optional\n Sets whether the provided break estimates are log-ed (base 10)\n values. This is disabled by default. When enabled, the brk must\n be a unitless `~astropy.units.Quantity`\n (`u.dimensionless_unscaled`).\n low_cut : `~astropy.units.Quantity`, optional\n Lowest frequency to consider in the fit.\n high_cut : `~astropy.units.Quantity`, optional\n Highest frequency to consider in the fit.\n min_fits_pts : int, optional\n Sets the minimum number of points needed to fit. If not met, the\n break found is rejected.\n weighted_fit : bool, optional\n Fit using weighted least-squares. The weights are\n the inverse-squared standard deviations in each radial bin.\n bootstrap : bool, optional\n Bootstrap using the model residuals to estimate the parameter\n standard errors. This tends to give more realistic intervals than\n the covariance matrix.\n bootstrap_kwargs : dict, optional\n Pass keyword arguments to `~turbustat.statistics.fitting_utils.residual_bootstrap`.\n verbose : bool, optional\n Enables verbose mode in Lm_Seg.\n '''\n\n self._bootstrap_flag = bootstrap\n\n if fit_unbinned:\n yy_freq, xx_freq = make_radial_freq_arrays(self.ps2D.shape)\n\n freqs_2d = np.sqrt(yy_freq**2 + xx_freq**2) / u.pix\n\n # Make the data to fit to\n if low_cut is None:\n # Default to the largest frequency, since this is just 1 pixel\n # in the 2D PSpec.\n self.low_cut = 1. / (0.5 * float(max(self.ps2D.shape)) * u.pix)\n else:\n self.low_cut = self._to_pixel_freq(low_cut)\n\n if high_cut is None:\n # self.high_cut = self.freqs.max().value / u.pix\n self.high_cut = freqs_2d.value.max() / u.pix\n else:\n self.high_cut = self._to_pixel_freq(high_cut)\n\n\n clip_mask = clip_func(freqs_2d.value,\n self.low_cut.value,\n self.high_cut.value)\n\n\n x = np.log10(freqs_2d.value[clip_mask])\n y = np.log10(self.ps2D[clip_mask])\n\n else:\n # Make the data to fit to\n if low_cut is None:\n # Default to the largest frequency, since this is just 1 pixel\n # in the 2D PSpec.\n self.low_cut = 1. / (0.5 * float(max(self.ps2D.shape)) * u.pix)\n else:\n self.low_cut = self._to_pixel_freq(low_cut)\n\n if high_cut is None:\n self.high_cut = self.freqs.max().value / u.pix\n else:\n self.high_cut = self._to_pixel_freq(high_cut)\n\n x = np.log10(self.freqs[clip_func(self.freqs.value, self.low_cut.value,\n self.high_cut.value)].value)\n\n clipped_ps1D = self.ps1D[clip_func(self.freqs.value,\n self.low_cut.value,\n self.high_cut.value)]\n y = np.log10(clipped_ps1D)\n\n if weighted_fit:\n if fit_unbinned:\n raise NotImplementedError(\"Error propagation for the unbinned modeling is not \"\n \"implemented yet.\")\n\n # Currently this will run only for the binned fitting.\n clipped_stddev = self.ps1D_stddev[clip_func(self.freqs.value,\n self.low_cut.value,\n self.high_cut.value)]\n\n clipped_stddev[clipped_stddev == 0.] = np.NaN\n\n y_err = 0.434 * clipped_stddev / clipped_ps1D\n\n weights = 1 / y_err**2\n else:\n weights = None\n\n if brk is not None:\n # Try the fit with a break in it.\n if not log_break:\n brk = self._to_pixel_freq(brk).value\n brk = np.log10(brk)\n else:\n # A value given in log shouldn't have dimensions\n if hasattr(brk, \"unit\"):\n assert brk.unit == u.dimensionless_unscaled\n brk = brk.value\n\n brk_fit = Lm_Seg(x, y, brk, weights=weights)\n brk_fit.fit_model(verbose=verbose, cov_type='HC3')\n\n if brk_fit.params.size == 5:\n\n # Check to make sure this leaves enough to fit to.\n if sum(x < brk_fit.brk) < min_fits_pts:\n warnings.warn(\"Not enough points to fit to.\" +\n \" Ignoring break.\")\n\n self._brk = None\n else:\n good_pts = x.copy() < brk_fit.brk\n x = x[good_pts]\n y = y[good_pts]\n\n self._brk = 10**brk_fit.brk / u.pix\n\n self._slope = brk_fit.slopes\n\n if bootstrap:\n stderrs = residual_bootstrap(brk_fit.fit,\n **bootstrap_kwargs)\n\n self._slope_err = stderrs[1:-1]\n self._brk_err = np.log(10) * self.brk.value * \\\n stderrs[-1] / u.pix\n\n else:\n self._slope_err = brk_fit.slope_errs\n self._brk_err = np.log(10) * self.brk.value * \\\n brk_fit.brk_err / u.pix\n\n self.fit = brk_fit.fit\n self._model = brk_fit\n\n else:\n self._brk = None\n # Break fit failed, revert to normal model\n warnings.warn(\"Model with break failed, reverting to model\\\n without break.\")\n else:\n self._brk = None\n self._brk_err = None\n\n if self.brk is None:\n x = sm.add_constant(x)\n\n if weighted_fit:\n model = sm.WLS(y, x, missing='drop', weights=weights)\n else:\n model = sm.OLS(y, x, missing='drop')\n\n self.fit = model.fit(cov_type='HC3')\n\n self._slope = self.fit.params[1]\n\n if bootstrap:\n stderrs = residual_bootstrap(self.fit,\n **bootstrap_kwargs)\n self._slope_err = stderrs[1]\n\n else:\n self._slope_err = self.fit.bse[1]\n\n @property\n def slope(self):\n '''\n Power spectrum slope(s).\n '''\n return self._slope\n\n @property\n def slope_err(self):\n '''\n 1-sigma error on the power spectrum slope(s).\n '''\n return self._slope_err\n\n @property\n def brk(self):\n '''\n Fitted break point.\n '''\n return self._brk\n\n @property\n def brk_err(self):\n '''\n 1-sigma on the break point.\n '''\n return self._brk_err\n\n def apodizing_kernel(self, kernel_type=\"tukey\", alpha=0.1, beta=0.0):\n '''\n Return an apodizing kernel to be applied to the image before taking\n Fourier transform\n\n Returns\n -------\n window : `~numpy.ndarray`\n Apodizing kernel\n '''\n\n if self.data is not None:\n shape = self.data.shape\n else:\n # MVC doesn't have a data attribute set\n shape = self.centroid.shape\n\n # Assume first axis is velocity if >2 dimensions\n if len(shape) > 2:\n shape = shape[1:]\n\n avail_types = ['splitcosinebell', 'hanning', 'tukey',\n 'cosinebell']\n\n if kernel_type == \"splitcosinebell\":\n return SplitCosineBellWindow(alpha, beta)(shape)\n elif kernel_type == \"hanning\":\n return HanningWindow()(shape)\n elif kernel_type == \"tukey\":\n return TukeyWindow(alpha)(shape)\n elif kernel_type == 'cosinebell':\n return CosineBellWindow(alpha)(shape)\n else:\n raise ValueError(\"kernel_type {0} is not one of the available \"\n \"types: {1}\".format(kernel_type, avail_types))\n\n return window\n\n def fit_2Dpspec(self, fit_method='LevMarq', p0=(), low_cut=None,\n high_cut=None, bootstrap=True, niters=100,\n use_azimmask=False, radial_weighting=False,\n fix_ellip_params=False):\n '''\n Model the 2D power-spectrum surface with an elliptical power-law model.\n\n Parameters\n ----------\n fit_method : str, optional\n The algorithm fitting to use. Only 'LevMarq' is currently\n available.\n p0 : tuple, optional\n Initial parameters for fitting. If no values are given, the initial\n parameters start from the 1D fit parameters.\n low_cut : `~astropy.units.Quantity`, optional\n Lowest frequency to consider in the fit.\n high_cut : `~astropy.units.Quantity`, optional\n Highest frequency to consider in the fit.\n bootstrap : bool, optional\n Bootstrap using the model residuals to estimate the parameter\n standard errors. This tends to give more realistic intervals than\n the covariance matrix.\n niters : int, optional\n Number of bootstrap iterations.\n use_azimmask : bool, optional\n Use the azimuthal mask defined for the 1D spectrum, when azimuthal\n limit have been given.\n radial_weighting : bool, optional\n To account for the increasing number of samples at greater radii,\n the fit can be weighted by :math:`1/\\\\mathrm{radius}` to emphasize the\n points at small radii. DO NOT enabled weighting when the field is\n elliptical! This will bias the fit parameters! Default is False.\n fix_ellip_params : bool, optional\n If the field is expected to be isotropic, the ellipticity and theta\n parameters can be fixed in the fit. This will help the fit since\n the isotropic case sits at the edge of the ellipticity parameter\n space and can be difficult to correctly converge to.\n '''\n\n # Make the data to fit to\n if low_cut is None:\n # Default to the largest frequency, since this is just 1 pixel\n # in the 2D PSpec.\n self.low_cut = 1. / (0.5 * float(max(self.ps2D.shape)) * u.pix)\n else:\n self.low_cut = self._to_pixel_freq(low_cut)\n\n if high_cut is None:\n self.high_cut = self.freqs.max().value / u.pix\n else:\n self.high_cut = self._to_pixel_freq(high_cut)\n\n yy_freq, xx_freq = make_radial_freq_arrays(self.ps2D.shape)\n\n freqs_dist = np.sqrt(yy_freq**2 + xx_freq**2)\n\n mask = clip_func(freqs_dist, self.low_cut.value, self.high_cut.value)\n\n if hasattr(self, \"_azim_mask\") and use_azimmask:\n mask = np.logical_and(mask, self._azim_mask)\n\n if not mask.any():\n raise ValueError(\"Limits have removed all points to fit. \"\n \"Make low_cut and high_cut less restrictive.\")\n\n if len(p0) == 0:\n if hasattr(self, 'slope'):\n if isinstance(self.slope, np.ndarray):\n slope_guess = self.slope[0]\n else:\n slope_guess = self.slope\n amp_guess = self.fit.params[0]\n else:\n # Let's guess it's going to be ~ -2\n slope_guess = -2.\n amp_guess = np.log10(np.nanmax(self.ps2D))\n\n # Use an initial guess pi / 2 for theta\n theta = np.pi / 2.\n # For ellip = 0.5\n ellip_conv = 0\n p0 = (amp_guess, ellip_conv, theta, slope_guess)\n\n fit_values = np.log10(self.ps2D[mask])\n if isinstance(fit_values, u.Quantity):\n fit_values = fit_values.value\n\n params, stderrs, fit_2Dmodel, fitter = \\\n fit_elliptical_powerlaw(fit_values,\n xx_freq[mask],\n yy_freq[mask], p0,\n fit_method=fit_method,\n bootstrap=bootstrap,\n niters=niters,\n radial_weighting=radial_weighting,\n fix_ellip_params=fix_ellip_params)\n\n self.fit2D = fit_2Dmodel\n self._fitter = fitter\n\n self._slope2D = params[3]\n self._slope2D_err = stderrs[3]\n\n self._theta2D = params[2] % np.pi\n self._theta2D_err = stderrs[2]\n\n # Apply transforms to convert back to the [0, 1) ellipticity range\n self._ellip2D = inverse_interval_transform(params[1], 0, 1)\n self._ellip2D_err = \\\n inverse_interval_transform_stderr(stderrs[1], params[1], 0, 1)\n\n # Add a warning that if ellip is close to 1 it may be worth fixing that\n # parameter.\n if self.ellip2D > 0.97 and not fix_ellip_params:\n warnings.warn(\"The elliptical parameter is close to 1. The field \"\n \"may be isotropic and the fit is not converging to \"\n \"1. Consider fitting with `fix_ellip_params=True`,\"\n \" which forces the ellipticity to 1.\")\n\n @property\n def slope2D(self):\n '''\n Fitted slope of the 2D power spectrum.\n '''\n return self._slope2D\n\n @property\n def slope2D_err(self):\n '''\n Slope standard error of the 2D power spectrum.\n '''\n return self._slope2D_err\n\n @property\n def theta2D(self):\n '''\n Fitted position angle of the 2D power spectrum.\n '''\n return self._theta2D\n\n @property\n def theta2D_err(self):\n '''\n Position angle standard error of the 2D power spectrum.\n '''\n return self._theta2D_err\n\n @property\n def ellip2D(self):\n '''\n Fitted ellipticity of the 2D power spectrum.\n '''\n return self._ellip2D\n\n @property\n def ellip2D_err(self):\n '''\n Ellipticity standard error of the 2D power spectrum.\n '''\n return self._ellip2D_err\n\n def plot_fit(self, show_2D=False, show_residual=True,\n color='r', fit_color='k', label=None,\n fillin_errs=True, symbol=\"o\", xunit=u.pix**-1, save_name=None,\n use_wavenumber=False):\n '''\n Plot the fitted model.\n\n Parameters\n ----------\n show_2D : bool, optional\n Plot the 2D power spectrum with contours for the masked regions\n and 2D fit contours (if the 2D power spectrum was fit).\n show_residual : bool, optional\n Plot the residuals for the 1D power-spectrum fit.\n color : str, optional\n Color to use in the plotted points.\n fit_color : str, optional\n Color to show the fitted relation in. Defaults to `color` when\n no color is given.\n label : str, optional\n Apply a label to the 1D plot. Useful for overplotting multiple\n power-spectra.\n fillin_errs : bool, optional\n Show the range of the standard deviation with as a transparent\n filled in region. When disabled, the standard deviations are shown\n as error bars.\n symbol : str, optional\n Plot symbols for the 1D power spectrum.\n xunit : `astropy.units.Unit`, optional\n Units for the x-axis. If a header is given, `xunit` can be given\n in inverse angular units. And if a distance is given, an inverse\n physical unit can also be passed.\n save_name : str, optional\n File name for the plot to be saved. Enables saving when a string\n is given.\n use_wavenumber : bool, optional\n Convert spatial frequencies to a wavenumber.\n '''\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n\n if use_wavenumber:\n xlab = r\"k / (\" + xunit.to_string() + \")\"\n else:\n xlab = r\"Spatial Frequency (\" + xunit.to_string() + \")\"\n\n if fit_color is None:\n fit_color = color\n\n fig = plt.gcf()\n axes = plt.gcf().get_axes()\n\n # Setup axes\n if len(axes) == 3:\n # Setup for all 3 axes\n ax = axes[0]\n ax_1D = axes[1]\n ax_1D_res = axes[2]\n elif len(axes) == 2:\n if show_2D:\n ax = axes[0]\n ax_1D = axes[1]\n elif show_residual:\n ax_1D = axes[0]\n ax_1D_res = axes[1]\n elif len(axes) == 1:\n ax_1D = axes[0]\n else:\n # If there are none, setup the initial axes\n if show_2D:\n ax = plt.subplot2grid((4, 2), (0, 1), colspan=1, rowspan=4)\n\n if show_residual:\n ax_1D = plt.subplot2grid((4, 2), (0, 0), colspan=1,\n rowspan=3)\n ax_1D_res = plt.subplot2grid((4, 2), (3, 0), colspan=1,\n rowspan=1, sharex=ax_1D)\n else:\n ax_1D = plt.subplot2grid((4, 2), (0, 0), colspan=1,\n rowspan=4)\n else:\n if show_residual:\n ax_1D = plt.subplot2grid((4, 1), (0, 0), colspan=1,\n rowspan=3)\n ax_1D_res = plt.subplot2grid((4, 1), (3, 0), colspan=1,\n rowspan=1, sharex=ax_1D)\n else:\n ax_1D = plt.subplot2grid((4, 1), (0, 0), colspan=1,\n rowspan=4)\n\n # 2D Spectrum is shown alongside 1D. Otherwise only 1D is returned.\n if show_2D:\n yy_freq, xx_freq = make_radial_freq_arrays(self.ps2D.shape)\n\n freqs_dist = np.sqrt(yy_freq**2 + xx_freq**2)\n\n mask = np.logical_and(freqs_dist >= self.low_cut.value,\n freqs_dist <= self.high_cut.value)\n\n # Scale the colour map to be values within the mask\n vmax = np.log10(self.ps2D[mask]).max()\n vmin = np.log10(self.ps2D[mask]).min()\n\n im1 = ax.imshow(np.log10(self.ps2D), interpolation=\"nearest\",\n origin=\"lower\", vmax=vmax, vmin=vmin)\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", \"5%\", pad=\"3%\")\n cb = plt.colorbar(im1, cax=cax)\n cb.set_label(r\"log $P_2 \\ (K_x,\\ K_y)$\")\n\n ax.contour(mask, colors=[color], linestyles='--')\n\n # Plot fit contours\n if hasattr(self, 'fit2D'):\n ax.contour(self.fit2D(xx_freq, yy_freq), cmap='viridis')\n\n if hasattr(self, \"_azim_mask\"):\n ax.contour(self._azim_mask, colors=[color], linestyles='--')\n\n good_interval = clip_func(self.freqs.value, self.low_cut.value,\n self.high_cut.value)\n\n if show_residual:\n if isinstance(self.slope, np.ndarray):\n # Broken linear model\n y_res = np.log10(self.ps1D) - \\\n self._model.model(np.log10(self.freqs.value))\n else:\n y_res = np.log10(self.ps1D) - \\\n self.fit.predict(sm.add_constant(np.log10(self.freqs.value)))\n\n fit_index = np.logical_and(np.isfinite(self.ps1D), good_interval)\n\n # Set the x-values to use (freqs or k)\n if use_wavenumber:\n xvals = self.wavenumbers\n else:\n xvals = self.freqs\n\n xvals = self._spatial_freq_unit_conversion(xvals, xunit).value\n\n # Axis limits to highlight the fitted region\n vmax = 1.1 * \\\n np.nanmax((self.ps1D + self.ps1D_stddev)\n [self.freqs <= self.high_cut])\n\n logyerrs = 0.434 * (self.ps1D_stddev / self.ps1D)\n\n if fillin_errs:\n # Implementation by R. Boyden\n ax_1D.fill_between(np.log10(xvals),\n np.log10(self.ps1D) - logyerrs,\n np.log10(self.ps1D) + logyerrs,\n color=color,\n alpha=0.5)\n\n ax_1D.plot(np.log10(xvals), np.log10(self.ps1D), symbol,\n color=color, markersize=5, alpha=0.8)\n\n if show_residual:\n ax_1D_res.fill_between(np.log10(xvals),\n y_res - logyerrs,\n y_res + logyerrs,\n color=color,\n alpha=0.5)\n\n ax_1D_res.plot(np.log10(xvals), y_res,\n symbol, color=color, markersize=5, alpha=0.8)\n\n else:\n ax_1D.errorbar(np.log10(xvals),\n np.log10(self.ps1D),\n yerr=logyerrs,\n color=color,\n fmt=symbol, markersize=5, alpha=0.5, capsize=10,\n elinewidth=3)\n\n if show_residual:\n ax_1D_res.errorbar(np.log10(xvals),\n y_res,\n yerr=logyerrs,\n color=color,\n fmt=symbol, markersize=5, alpha=0.5,\n capsize=10, elinewidth=3)\n\n xvals_plot = np.log10(xvals[fit_index]).ravel()\n # y_fit = self.fit.fittedvalues\n y_fit = self.fit.predict(sm.add_constant(xvals_plot))\n\n ax_1D.plot(xvals_plot, y_fit, linestyle='-',\n label=label, linewidth=3, color=fit_color)\n\n if show_residual:\n ax_1D_res.set_xlabel(\"log \" + xlab)\n ax_1D_res.set_ylabel(r\"Residuals\")\n plt.setp(ax_1D.get_xticklabels(), visible=False)\n else:\n ax_1D.set_xlabel(\"log \" + xlab)\n\n ax_1D.set_ylabel(r\"log P$_2(K)$\")\n\n ax_1D.set_ylim(top=np.log10(vmax))\n\n # Show the fitting extents\n low_cut = self._spatial_freq_unit_conversion(self.low_cut, xunit).value\n high_cut = \\\n self._spatial_freq_unit_conversion(self.high_cut, xunit).value\n low_cut = low_cut if not use_wavenumber else \\\n low_cut * min(self._ps2D.shape)\n high_cut = high_cut if not use_wavenumber else \\\n high_cut * min(self._ps2D.shape)\n ax_1D.axvline(np.log10(low_cut), color=color, alpha=0.5,\n linestyle='--')\n ax_1D.axvline(np.log10(high_cut), color=color, alpha=0.5,\n linestyle='--')\n ax_1D.grid(True)\n\n if show_residual:\n ax_1D_res.axvline(np.log10(low_cut), color=color, alpha=0.5,\n linestyle='--')\n ax_1D_res.axvline(np.log10(high_cut), color=color, alpha=0.5,\n linestyle='--')\n ax_1D_res.grid(True)\n\n ax_1D_res.set_xlim(ax_1D.get_xlim())\n\n plt.tight_layout()\n\n fig.subplots_adjust(hspace=0.1)\n\n if save_name is not None:\n plt.savefig(save_name)\n plt.close()\n else:\n plt.show()\n","repo_name":"Astroua/TurbuStat","sub_path":"turbustat/statistics/base_pspec2.py","file_name":"base_pspec2.py","file_ext":"py","file_size_in_byte":28211,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"81"} +{"seq_id":"26577885439","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\"\"\"\nYokadi daemon. Used to monitor due tasks and warn user.\n\n@author: Sébastien Renard \n@license: GPL v3 or later\n\"\"\"\n\nimport sys\nimport os\nimport time\nfrom datetime import datetime, timedelta\nfrom signal import SIGTERM, SIGHUP, signal\nfrom subprocess import Popen\nfrom argparse import ArgumentParser\n\nfrom yokadi.core import fileutils\n\ntry:\n import setproctitle\nexcept ImportError:\n print(\"You don't have the setproctitle package.\")\n print(\"Get it on http://pypi.python.org/pypi/setproctitle/\")\n print(\"Or use 'easy_install setproctitle'\")\n sys.exit(1)\n\nfrom yokadi.core.daemon import Daemon\nfrom yokadi.core import basepaths\nfrom yokadi.yical.yical import YokadiIcalServer\n\nfrom yokadi.core import db\nfrom yokadi.core.db import Project, Task, getConfigKey\nfrom yokadi.ycli import commonargs\n\n\n# Daemon polling delay (in seconds)\nPROCESS_INTERVAL = 30\nEVENTLOOP_INTERVAL = 1\n\n# Ical daemon default port\nDEFAULT_TCP_ICAL_PORT = 8000\n\n# Event sender to main loop\nevent = [True, \"\"]\n\n\ndef sigTermHandler(signal, stack):\n \"\"\"Handler when yokadid receive SIGTERM\"\"\"\n print(\"Receive SIGTERM. Exiting\")\n print(\"End of yokadi Daemon\")\n event[0] = False\n event[1] = \"SIGTERM\"\n\n\ndef sigHupHandler(signal, stack):\n \"\"\"Handler when yokadid receive SIGHUP\"\"\"\n print(\"Receive SIGHUP. Reloading configuration\")\n event[0] = False\n event[1] = \"SIGHUP\"\n\n\ndef eventLoop():\n \"\"\"Main event loop\"\"\"\n delta = timedelta(hours=float(getConfigKey(\"ALARM_DELAY\")))\n suspend = timedelta(hours=float(getConfigKey(\"ALARM_SUSPEND\")))\n cmdDelayTemplate = getConfigKey(\"ALARM_DELAY_CMD\")\n cmdDueTemplate = getConfigKey(\"ALARM_DUE_CMD\")\n session = db.getSession()\n # For the two following dict, task id is key, and value is (duedate, triggerdate)\n triggeredDelayTasks = {}\n triggeredDueTasks = {}\n activeTaskFilter = [Task.status != \"done\",\n Task.projectId == Project.id,\n Project.active == True] # noqa\n\n def process(now):\n delayTasks = session.query(Task).filter(Task.dueDate < now + delta,\n Task.dueDate > now,\n *activeTaskFilter)\n dueTasks = session.query(Task).filter(Task.dueDate < now,\n *activeTaskFilter)\n processTasks(delayTasks, triggeredDelayTasks, cmdDelayTemplate, suspend)\n processTasks(dueTasks, triggeredDueTasks, cmdDueTemplate, suspend)\n\n nextProcessTime = datetime.today().replace(microsecond=0)\n while event[0]:\n now = datetime.today().replace(microsecond=0)\n if now > nextProcessTime:\n process(now)\n nextProcessTime = now + timedelta(seconds=PROCESS_INTERVAL)\n time.sleep(EVENTLOOP_INTERVAL)\n\n\ndef processTasks(tasks, triggeredTasks, cmdTemplate, suspend):\n \"\"\"Process a list of tasks and trigger action if needed\n @param tasks: list of tasks\n @param triggeredTasks: dict of tasks that has been triggered. Dict can be updated\n @param cmdTemplate: command line template to execute if task trigger\n @param suspend: timedelta beetween to task trigger\"\"\"\n now = datetime.now()\n for task in tasks:\n if task.id in triggeredTasks and triggeredTasks[task.id][0] == task.dueDate:\n # This task with the same dueDate has already been triggered\n if now - triggeredTasks[task.id][1] < suspend:\n # Task has been trigger recently, skip to next\n continue\n print(\"Task %s is due soon\" % task.title)\n cmd = cmdTemplate.replace(\"{ID}\", str(task.id))\n cmd = cmd.replace(\"{TITLE}\", task.title.replace('\"', '\\\"'))\n cmd = cmd.replace(\"{PROJECT}\", task.project.name.replace('\"', '\\\"'))\n cmd = cmd.replace(\"{DATE}\", str(task.dueDate))\n process = Popen(cmd, shell=True)\n process.wait()\n # TODO: redirect stdout/stderr properly to Log (not so easy...)\n triggeredTasks[task.id] = (task.dueDate, datetime.now())\n\n\ndef killYokadid(pidFile):\n \"\"\"Kill Yokadi daemon\n @param pidFile: file where the pid of the daemon is stored\n \"\"\"\n # reuse Daemon.stop() code\n daemon = Daemon(pidFile)\n daemon.stop()\n\n\ndef parseOptions(defaultPidFile, defaultLogFile):\n parser = ArgumentParser()\n\n commonargs.addArgs(parser)\n\n parser.add_argument(\"-i\", \"--icalserver\",\n dest=\"icalserver\", default=False, action=\"store_true\",\n help=\"Start the optional HTTP Ical Server\")\n\n parser.add_argument(\"-p\", \"--port\",\n dest=\"tcpPort\", default=DEFAULT_TCP_ICAL_PORT,\n help=\"TCP port of ical server (default: %s)\" % DEFAULT_TCP_ICAL_PORT,\n metavar=\"PORT\")\n\n parser.add_argument(\"-l\", \"--listen\",\n dest=\"tcpListen\", default=False, action=\"store_true\",\n help=\"Listen on all interface (not only localhost) for ical server\")\n\n parser.add_argument(\"-k\", \"--kill\",\n dest=\"kill\", default=False, action=\"store_true\",\n help=\"Kill the Yokadi daemon. The daemon is found from the process ID stored in the file\"\n \" specified with --pid\")\n\n parser.add_argument(\"--restart\",\n dest=\"restart\", default=False, action=\"store_true\",\n help=\"Restart the Yokadi daemon. The daemon is found from the process ID stored in the file\"\n \" specified with --pid\")\n\n parser.add_argument(\"-f\", \"--foreground\",\n dest=\"foreground\", default=False, action=\"store_true\",\n help=\"Don't fork background. Useful for debug\")\n\n parser.add_argument(\"--pid\",\n dest=\"pidFile\", default=defaultPidFile,\n help=\"File in which Yokadi daemon stores its process ID (default: %s)\" % defaultPidFile)\n\n parser.add_argument(\"--log\",\n dest=\"logFile\", default=defaultLogFile,\n help=\"File in which Yokadi daemon stores its log output (default: %s)\" % defaultLogFile)\n\n return parser.parse_args()\n\n\nclass YokadiDaemon(Daemon):\n def __init__(self, dbPath, options):\n Daemon.__init__(self, options.pidFile, stdout=options.logFile, stderr=options.logFile)\n self.dbPath = dbPath\n self.options = options\n\n def run(self):\n db.connectDatabase(self.dbPath, createIfNeeded=False)\n print(\"Using %s\" % self.dbPath)\n session = db.getSession()\n\n # Basic tests :\n if not len(session.query(db.Config).all()) >= 1:\n print(\"Your database seems broken or not initialised properly. Start yokadi command line tool to do it\")\n sys.exit(1)\n\n # Start ical http handler\n if self.options.icalserver:\n yokadiIcalServer = YokadiIcalServer(self.options.tcpPort, self.options.tcpListen)\n yokadiIcalServer.start()\n\n # Start the main event Loop\n try:\n while event[1] != \"SIGTERM\":\n eventLoop()\n event[0] = True\n except KeyboardInterrupt:\n print(\"\\nExiting...\")\n\n\ndef main():\n # TODO: check that yokadid is not already running for this database ? Not very harmful...\n # Set process name to \"yokadid\"\n setproctitle.setproctitle(\"yokadid\")\n\n # Make the event list global to allow communication with main event loop\n global event\n\n defaultPidFile = os.path.join(basepaths.getRuntimeDir(), \"yokadid.pid\")\n defaultLogFile = os.path.join(basepaths.getLogDir(), \"yokadid.log\")\n args = parseOptions(defaultPidFile, defaultLogFile)\n _, dbPath = commonargs.processArgs(args)\n\n if args.kill:\n killYokadid(args.pidFile)\n sys.exit(0)\n\n if args.pidFile == defaultPidFile:\n fileutils.createParentDirs(args.pidFile, mode=0o700)\n\n if args.logFile == defaultLogFile:\n fileutils.createParentDirs(args.logFile, mode=0o700)\n\n signal(SIGTERM, sigTermHandler)\n signal(SIGHUP, sigHupHandler)\n\n if args.restart:\n daemon = YokadiDaemon(dbPath, args)\n daemon.restart()\n\n daemon = YokadiDaemon(dbPath, args)\n if args.foreground:\n daemon.run()\n else:\n daemon.start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"agateau/yokadi","sub_path":"yokadi/yokadid.py","file_name":"yokadid.py","file_ext":"py","file_size_in_byte":8475,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"81"} +{"seq_id":"36473324730","text":"import copy\n\n\nclass Piece:\n\n names = [\"X\", \"x\"]\n\n def __init__(self, color):\n self.color = color\n self.name = self.names[color]\n self.times_moved = 0\n\n def moved(self):\n self.times_moved += 1\n\n # true if target space is empty or your opponents piece\n def _valid_target(self, board, position):\n x = position[0]\n y = position[1]\n if range(0, 8).__contains__(x) and range(0, 8).__contains__(y):\n target = board[x][y]\n if target == []:\n return True\n elif target.color != self.color:\n return True\n\n def _vertical_moves_from_position(self, board, position):\n moves = []\n for i in [-1, 1]:\n y_branch = copy.copy(position)\n searching = True\n while searching:\n y_branch[1] += i\n if y_branch[1] > 7 or y_branch[1] < 0:\n searching = False\n else:\n current_square = board[y_branch[0]][y_branch[1]]\n if current_square == []:\n moves.append(copy.copy(y_branch))\n elif current_square.color == self.color:\n searching = False\n else:\n moves.append(y_branch)\n searching = False\n return moves\n\n def _horizontal_moves_from_position(self, board, position):\n moves = []\n for i in [-1, 1]:\n x_branch = copy.copy(position)\n searching = True\n while searching:\n x_branch[0] += i\n if x_branch[0] > 7 or x_branch[0] < 0:\n searching = False\n else:\n current_square = board[x_branch[0]][x_branch[1]]\n if current_square == []:\n moves.append(copy.copy(x_branch))\n elif current_square.color == self.color:\n searching = False\n else:\n moves.append(x_branch)\n searching = False\n return moves\n\n #return all straight moves piece can make: used for queen and rook\n def _straight_moves_from_position(self, board, position):\n return self._vertical_moves_from_position(\n board, position) + self._horizontal_moves_from_position(\n board, position)\n\n #return all diagonal moves piece can make: used for queen and bishop\n def _diagonal_moves_from_position(self, board, position):\n moves = []\n for x in [-1, 1]:\n for y in [-1, 1]:\n searching_branch = True\n new_x_pos = position[0] + x\n new_y_pos = position[1] + y\n while searching_branch:\n if self._valid_target(board, [new_x_pos, new_y_pos]):\n moves.append([new_x_pos, new_y_pos])\n if board[new_x_pos][new_y_pos] != []:\n searching_branch = False\n new_x_pos += x\n new_y_pos += y\n\n else:\n searching_branch = False\n\n return moves\n\n #return a list of all positions this piece could move to from the given position\n def moves_from_position(self, board, position):\n return []","repo_name":"mgtsn/chess-py","sub_path":"pieces/piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29660873007","text":"import pytest\nimport numpy as np\n\nfrom spikeinterface.extractors import toy_example\n\n\ndef test_toy_example():\n rec, sorting = toy_example(num_segments=2, num_units=10)\n assert rec.get_num_segments() == 2\n assert sorting.get_num_segments() == 2\n assert sorting.get_num_units() == 10\n # print(rec)\n # print(sorting)\n\n rec, sorting = toy_example(num_segments=1)\n assert rec.get_num_segments() == 1\n assert sorting.get_num_segments() == 1\n print(rec)\n print(sorting)\n\n # print(rec.get_channel_locations())\n\n probe = rec.get_probe()\n print(probe)\n\n # import matplotlib.pyplot as plt\n # fig, ax = plt.subplots()\n # ax.plot(rec.get_traces())\n # plt.show()\n\n\nif __name__ == '__main__':\n test_toy_example()\n","repo_name":"caniko/spikeinterface","sub_path":"spikeinterface/extractors/tests/test_toy_example.py","file_name":"test_toy_example.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"74921852104","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[35]:\n\n\ndata = pd.read_csv('D:\\\\nidishdoc\\\\titanic\\\\titanic.csv')\ndata = data.drop('PassengerId', axis = 1)\ndata\n\n\n# In[4]:\n\n\nmissing=data.isnull().sum()\nmissing\n\n\n# In[36]:\n\n\ndata_1=data.copy()\ndata_1['Age']=data_1['Age'].fillna(data_1['Age'].mean())\nunique=data_1['Cabin'].unique()\nunique\nunique_1=data_1['Cabin'].value_counts()\nunique_1\n\n\n# In[37]:\n\n\ndata_1['Cabin']=data_1['Cabin'].fillna(data_1['Cabin'].mode()[0])\ndata_1['Cabin']\ndata_1.info()\n\n\n# In[38]:\n\n\nfrom sklearn import preprocessing\nlabel_encoder = preprocessing.LabelEncoder()\ndata_1['Cabin']= label_encoder.fit_transform(data_1['Cabin'])\ndata_1.head()\n\n\n# In[39]:\n\n\ncorrelation=data_1.corr()\nsns.heatmap(correlation,annot=True)\nplt.show()\n\n\n# In[40]:\n\n\nsns.pairplot(data_1)\nplt.show()\n\n\n# In[71]:\n\n\ncolumns=['Embarked','Sex']\ndef categories(multi_columns):\n final=data_1\n i=0\n for field in multi_columns:\n \n print(field)\n data_2=pd.get_dummies(data_1[field],drop_first=True)\n data_1.drop([field],axis=1,inplace=True)\n if i == 0:\n final=data_2.copy()\n else:\n final=pd.concat([final,data_2],axis=1)\n i=i+1\n final=pd.concat([data_1,final],axis=1)\n \n return final\n\n\n# In[72]:\n\n\nfinal_data_1=categories(columns)\n\n\n# In[73]:\n\n\nfinal_data_1\n\n\n# In[77]:\n\n\nfinal_data=final_data_1.drop(columns=[\"Name\",'Ticket'],axis=1)\nfinal_data['Age']=final_data.Age.astype(int)\nfinal_data.info()\n\n\n# In[86]:\n\n\nX=final_data.drop(['Survived'],axis=1)\nY=final_data['Survived']\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import accuracy_score\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 1)\nX_train\n\n\n# In[87]:\n\n\nlogistic = LogisticRegression()\nlogistic.fit(X_train, y_train)\ny_pred= logistic.predict(X_test)\nscore_1=accuracy_score(y_test,y_pred)\nprint(\"Accuracy on Traing set: \",logistic.score(X_train,y_train))\nprint(\"Accuracy on Testing set: \",logistic.score(X_test,y_test))\nprint(\"accuracy_score\", score_1)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"nidish03/titanic","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37181843836","text":"## Global imports\nimport time\nfrom datetime import datetime, timedelta, date\nimport math\n\n## Project files\nimport secretsfiles\nimport strava_calls\nimport calculations\n\n## Get Strava API token\nstrava_token = secretsfiles.get_strava_token()\n## Read Athleteinfo for FTP value\nAthleteInfo = strava_calls.get_athlete_info( strava_token )\n\n## To get past 70 (10 weeks) days of activities calculate 'after' date in epochtime. 1 day = 86400 sec\ndaysHistory = 70\nListOfActivities = strava_calls.get_athlete_activities(daysHistory = daysHistory, strava_token = strava_token)\n\n## Since we need to know of each day if it was a rest day, and therefore no strava activity, we'll cycle through all days between epochHistory (start date) and today\n## Startdate is formatted from epoch to datetime.date\nstartDate = datetime.date( datetime.fromtimestamp( strava_calls.epochHistory(daysHistory) ))\nprint( \"Reading activities starting from \", startDate)\n\n## CTL – Chronic Training Load = Fitness\n## ATL – Acute Training Load = Fatigue\n## TSB – Training Stress Balance = Form\n## DailyProgress will be a list of the training per day. If there were two activities on the same day, they will be combined into one\n## DailyProgress = date, start_date_local, name, moving_time, weighted_average_watts, pss, intensityfactor, CTL, ATL, TSB \nDailyProgress = []\n## Checking for every date between startDate and today\nfitnessdate = startDate\nwhile fitnessdate <= date.today():\n ## Check if fitnessday is found in ListOfActivities\n ## StravaDate = datetime.date( datetime.strptime(activity[\"start_date_local\"], \"%Y-%m-%dT%H:%M:%SZ\") )\n foundActivities = [x for x in ListOfActivities if datetime.date( datetime.strptime(x[\"start_date_local\"], \"%Y-%m-%dT%H:%M:%SZ\") ) == fitnessdate ]\n if len(foundActivities) == 0:\n ## Add empty day, calculate Fitness with 0 value\n ## DailyProgress = date, start_date_local, name, moving_time, weighted_average_watts, pss, intensityfactor, CTL, ATL, TSB\n DailyProgress.append( [fitnessdate, 0, \"\", 0, 0, 0, 0, 0, 0, 0 ] )\n else:\n TempActivity = []\n for foundActivity in foundActivities:\n ## Add the scores of all activities today to sum into one score (todo)\n if \"weighted_average_watts\" in foundActivity:\n ## DailyProgress = date, start_date_local, name, moving_time, weighted_average_watts, pss, intensityfactor, CTL, ATL, TSB\n calc = calculations.calc_trainingload( moving_time = foundActivity[\"moving_time\"], weighted_average_watts = foundActivity[\"weighted_average_watts\"], ftp = AthleteInfo[\"ftp\"])\n\n TempActivity = [\n fitnessdate,\n foundActivity[\"start_date_local\"],\n foundActivity[\"name\"],\n foundActivity[\"moving_time\"],\n foundActivity[\"weighted_average_watts\"],\n calc[0], ## PSS\n calc[1], ## Intensity Factor\n 0, ## CTL\n 0, ## ATL,\n 0 ## TSB\n ]\n elif \"average_heartrate\" in foundActivity:\n ## DailyProgress = date, start_date_local, name, moving_time, weighted_average_watts, pss, intensityfactor, CTL, ATL, TSB\n ## 'has_heartrate': True\n ## 'average_heartrate': 134.4\n TempActivity= [\n fitnessdate,\n foundActivity[\"start_date_local\"],\n foundActivity[\"name\"],\n foundActivity[\"moving_time\"],\n foundActivity[\"average_heartrate\"],\n 0, ## PSS\n 0, ## Intensity Factor\n 0, ## CTL\n 0, ## ATL,\n 0 ## TSB\n ]\n else:\n ## If there is no weigthed_average_watts and no average_heartrate, we can't calculate fitness\n TempActivity = [\n fitnessdate,\n foundActivity[\"start_date_local\"],\n foundActivity[\"name\"],\n foundActivity[\"moving_time\"],\n 0,\n 0, ## PSS\n 0, ## Intensity Factor\n 0, ## CTL\n 0, ## ATL,\n 0 ## TSB\n ]\n \n ## DailyProgress = date, start_date_local, name, moving_time, weighted_average_watts, pss, intensityfactor, CTL, ATL, TSB\n DailyProgress.append( TempActivity )\n\n # End Loop of fitnessdate\n fitnessdate = fitnessdate + timedelta(days=1)\n\n## Now calculated fitness (CTL), fatigue (ATL) en form (TSB)\nyesterdayCTL = 0\nyesterdayATL = 0\n\nfor daily in DailyProgress:\n ## DailyProgress = date, start_date_local, name, moving_time, weighted_average_watts, pss(5), intensityfactor(6), CTL(7), ATL(8), TSB(9)\n ## todayCTL = yesterdayCTL +(( PSS - yesterdayCTL )* (1-exp(-1/42))\n ## todayATL = =yesterdayATL+((PSS -yesterdayATL) * (1-exp(-1/7)))\n todayCTL = yesterdayCTL + (( daily[5] - yesterdayCTL ) * (1 - math.exp( -1/42 )) )\n todayATL = yesterdayATL + (( daily[5] - yesterdayATL ) * (1 - math.exp( -1/7 )) )\n todayTSB = todayCTL - todayATL\n\n ## Now write the new values into current daily\n daily[7] = math.trunc(todayCTL)\n daily[8] = math.trunc(todayATL)\n daily[9] = math.trunc(todayTSB)\n\n ## Now replace yesterday's values with today's values\n yesterdayCTL = todayCTL\n yesterdayATL = todayATL\n if( daily[2] == \"\"):\n print( daily[0],\"##\", \"no activity\",\"##\" , daily[7],\"##\", daily[8],\"##\", daily[9])\n else:\n print( daily[0],\"##\", daily[2],\"##\", daily[7],\"##\", daily[8],\"##\", daily[9])\n","repo_name":"TheGabeMan/StravaStats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13743801374","text":"\"\"\"\n Base, Default, and generic Debug error handlers for txweb.\n\n\n\"\"\"\nfrom __future__ import annotations\nimport typing as T\n\nfrom twisted.python.failure import Failure\nfrom twisted.python.compat import intToBytes\n\nfrom txweb.lib.str_request import StrRequest\nfrom txweb.log import getLogger\nfrom ... import http_codes\nfrom . import html\nfrom ..str_request import StrRequest\nfrom .base import BaseHandler\n\n\nlog = getLogger(__name__)\n\n\n\n\n\n# noinspection PyMissingConstructor\nclass DefaultHandler(BaseHandler):\n \"\"\"\n Primarily focused with handling 3xx HTTP exception/codes thrown by the application.\n\n \"\"\"\n\n def process(self, request: StrRequest, reason: Failure) -> T.Union[None, bool]:\n \"\"\"\n As mentioned in class docblock, primary focus is handling HTTPCode exceptions thrown by the application.\n\n If the request/response factory has already started writing to the client, this halts all error processing\n and throws the exception.\n\n else if a HTTPCode exception/error it redirects for 3xx codes\n OR it writes the code and message to the client\n (Eg HTTPCode500 with Internal error would throw 500 \"Internal server error\")\n\n else it sends a 500 HTTP response and then raises the exception back into the user application.\n\n Parameters\n ----------\n request: StrRequest\n reason: Failure\n\n Returns\n -------\n False on failure to handle error\n \"\"\"\n\n if request.startedWriting not in [0, False]:\n # There is nothing we can do, the out going stream is already tainted\n # noinspection PyBroadException\n log.error(\"Failed writing error message to an active stream\")\n request.ensureFinished()\n reason.raiseException()\n\n elif isinstance(http_codes.HTTPCode, reason.type) or issubclass(reason.type, http_codes.HTTPCode):\n\n if issubclass(reason.type, http_codes.HTTP3xx):\n exc = reason.value\n request.redirect(exc.redirect, exc.code)\n response = html.REDIRECT_BODY.format(url=exc.redirect)\n request.writeTotal(response, code=exc.code, message=exc.message)\n else:\n exc = reason.value # type: HTTPCode\n request.setResponseCode(exc.code, exc.message)\n request.setHeader(\"Content-length\", intToBytes(len(exc.message)))\n request.write(exc.message)\n\n else:\n request.setResponseCode(500, b\"Internal server error\")\n log.debug(f\"Non-HTTPCode error was caught: {reason.type} - {reason.value}\")\n request.ensureFinished()\n reason.raiseException()\n\n request.ensureFinished()\n return True\n\n\n","repo_name":"devdave/txWeb","sub_path":"txweb/lib/errors/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"40300901528","text":"from bs4 import BeautifulSoup\nfrom tqdm import tqdm\nimport requests\nimport datetime\n\n\nclass Anaslo():\n\n def __init__(self, shop_name, year, date, area=\"東京都\"):\n self.shop_name = shop_name\n self.url_host = \"https://ana-slo.com/\"\n self.url_path_from_pref = \"ホールデータ/{}/{}-データ一覧/\".format(area, self.shop_name)\n self.date_str = self._get_date(year_string=year, date_string=date)\n self.url_path_from_shop = \"ホールデータ/{}-{}-data/\".format(self.date_str, self.shop_name).replace(\" \", \"\")\n self._log(\"URLの初期化が完了しました。\")\n\n def _get_header(self, mode=\"safari\"):\n header = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/62.0.3202.94 Safari/537.36\"}\n return header\n\n def _get_date(self, year_string, date_string):\n try:\n year = year_string if len(date_string) != 7 else date_string[:4]\n month = date_string[:2] if len(date_string) != 7 else date_string[5:7]\n day = date_string[2:4] if len(date_string) != 7 else date_string[7:8]\n dt = datetime.datetime(year=int(year), month=int(month), day=int(day))\n return dt.strftime(\"%Y-%m-%d\")\n except:\n print(\"[ERROR] 日付変換に失敗しました.\\n\"\n \"MMDDの4桁もしくは YYYYMMDDの8桁の数字形式で入力してください.\")\n exit()\n\n def fetch(self):\n target = self.url_host + self.url_path_from_shop\n raw_data = self._req(target=target)\n data = BeautifulSoup(raw_data.content, \"lxml\")\n tables = data.find(\"table\", class_=\"unit_get_medals_table\") \\\n .find_all(\"td\", class_=\"table_cells\")\n\n # セクション名及び機種名の取得\n sections = {}\n for sect_dom in tqdm(tables):\n sect_id = sect_dom.find(\"a\")[\"href\"].replace(\"#section\", \"\")\n if sect_dom.find(\"a\").text == \"1台設置機種\":\n sect_id = \"variety\"\n sections[sect_id] = sect_dom.find(\"a\").text\n self._log(\"機種一覧の取得に成功しました。\")\n\n rows = []\n first_row = [\"日付\", \"機種名\", \"台番号\", \"G数\", \"差枚\", \"BB\", \"RB\", \"合成確率\", \"BB確率\", \"RB確率\"]\n rows.append(first_row)\n for section_id, section_name in tqdm(sections.items()):\n section_datas_table = data.find(\"div\", id=\"tab01_{}\".format(section_id))\n if section_id != \"variety\":\n # 1台設置以外\n section_datas = section_datas_table.find_all(\"tr\")[1:]\n for section_data in section_datas:\n data_doms = section_data.find_all(\"td\", class_=\"table_cells\")\n datas = [dom.text.replace(',', '') for dom in data_doms]\n row = [self.date_str, section_name]\n row.extend(datas)\n rows.append(row)\n else:\n # 一台設置\n section_datas = section_datas_table.find_all(\"tr\")[1:]\n for section_data in section_datas:\n data_doms = section_data.find_all(\"td\")\n datas = [dom.text.replace(',', '') for dom in data_doms]\n row = [self.date_str]\n row.extend(datas)\n rows.append(row)\n self._log(\"全機種データの処理に成功しました。\")\n\n return rows\n\n def _req(self, target):\n raw_data = requests.get(url=target, headers=self._get_header())\n self._log(\"ページのリクエストに成功しました。\")\n return raw_data\n\n def _log(self, msg):\n print(\"[正常] {}\".format(msg))\n\n def to_csv(self, rows):\n path = \"./files/{}-{}.csv\".format(self.date_str, self.shop_name)\n with open(path, mode='w', encoding=\"utf_8_sig\") as f:\n for row in rows:\n f.write(','.join(row) + \"\\n\")\n self._log(\"ファイルの作成が完了しました。ファイル名:{}\".format(path.split(\"/\")[-1]))","repo_name":"XXXalice/slot","sub_path":"src/anaslo.py","file_name":"anaslo.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39627088884","text":"import pandas as pd\nimport numpy as np\nfrom dataclasses import make_dataclass\nimport matplotlib.pyplot as plt\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n def __repr__(self):\n return f'({self.x}, {self.y})'\n \npoints = pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])\nprint(points)\n\n\nx_values = points.apply(lambda row: row[0].x, axis=1)\ny_values = points.apply(lambda row: row[0].y, axis=1)\n\nplt.plot(x_values, y_values, 'o') # 'o' for marker style, you can choose a different marker if desired\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Plot of Points')\nplt.show()\n\nPoint = make_dataclass(\"Point\", [(\"x\", int), (\"y\", int)])\ndf = pd.DataFrame([Point(3, 1), Point(0, 3), Point(2, 3)])\nprint(df)\n\nplt.scatter(df['x'], df['y'], color='red')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Plot of Points')\nplt.show()","repo_name":"jwang1122/python","sub_path":"src/pandas/create03.py","file_name":"create03.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71310641224","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport unittest\nimport sys\nimport datetime\nimport os\n\nPY3 = sys.version_info[0] == 3\n\n#This module must be both python2 and python3 compatible\n\n\nclass MaxMinSumToken:\n def __init__(self, value):\n self.mad_value = value\n\n\ndef MAX(value):\n return MaxMinSumToken(value)\n\n\ndef MIN(value):\n return MaxMinSumToken(value)\n\n\ndef SUM(value):\n return MaxMinSumToken(value)\n\n\n# <<<< COPYPASTE FROM \"mad_max.py\"\n#####################################\n#####################################\n# This is to ensure that \"mad_max.py\" file has exactly the same content as this fragment. This condition will be ensured by test_mad_max.py\n# To edit this code you need to simultaneously edit this fragment and content of mad_max.py, otherwise test_mad_max.py will fail.\n\nbuiltin_max = max\nbuiltin_min = min\nbuiltin_sum = sum\n\n\ndef max(*args, **kwargs):\n single_arg = len(args) == 1 and not kwargs\n if single_arg:\n if PY3 and isinstance(args[0], str):\n return MAX(args[0])\n if not PY3 and isinstance(args[0], basestring):\n return MAX(args[0])\n if isinstance(args[0], int) or isinstance(args[0], float):\n return MAX(args[0])\n try:\n return builtin_max(*args, **kwargs)\n except TypeError:\n if single_arg:\n return MAX(args[0])\n raise\n\n\ndef min(*args, **kwargs):\n single_arg = len(args) == 1 and not kwargs\n if single_arg:\n if PY3 and isinstance(args[0], str):\n return MIN(args[0])\n if not PY3 and isinstance(args[0], basestring):\n return MIN(args[0])\n if isinstance(args[0], int) or isinstance(args[0], float):\n return MIN(args[0])\n try:\n return builtin_min(*args, **kwargs)\n except TypeError:\n if single_arg:\n return MIN(args[0])\n raise\n\n\ndef sum(*args):\n try:\n return builtin_sum(*args)\n except TypeError:\n if len(args) == 1:\n return SUM(args[0])\n raise\n\n#####################################\n#####################################\n# >>>> COPYPASTE END\n\n\n\ndef read_file(file_path):\n with open(file_path) as f:\n return f.read()\n\n\nclass TestMadMax(unittest.TestCase):\n\n def test_mad_max(self):\n now = datetime.datetime.now()\n self.assertTrue(max(7).mad_value == 7)\n self.assertTrue(max(None).mad_value == None)\n self.assertTrue(max(now).mad_value == now)\n self.assertTrue(max('hello').mad_value == 'hello')\n self.assertTrue(max(0.6).mad_value == 0.6)\n self.assertTrue(max(4, 6) == 6)\n self.assertTrue(max(4, 8, 6) == 8)\n self.assertTrue(max(4, 8, 6, key=lambda v: -v) == 4)\n if PY3:\n self.assertTrue(max([], default=7) == 7)\n self.assertTrue(max(['b', 'x', 'a'], default='m') == 'x')\n with self.assertRaises(TypeError) as cm:\n max(7, key=lambda v: v)\n e = cm.exception\n self.assertTrue(str(e).find('object is not iterable') != -1)\n\n\n def test_mad_min(self):\n now = datetime.datetime.now()\n self.assertTrue(min(7).mad_value == 7)\n self.assertTrue(min(None).mad_value == None)\n self.assertTrue(min(now).mad_value == now)\n self.assertTrue(min('hello').mad_value == 'hello')\n self.assertTrue(min(0.6).mad_value == 0.6)\n self.assertTrue(min(4, 6) == 4)\n self.assertTrue(min(4, 8, 6) == 4)\n self.assertTrue(min(4, 8, 6, key=lambda v: -v) == 8)\n if PY3:\n self.assertTrue(min([], default=7) == 7)\n self.assertTrue(min(['b', 'x', 'a'], default='m') == 'a')\n with self.assertRaises(TypeError) as cm:\n min(7, key=lambda v: v)\n e = cm.exception\n self.assertTrue(str(e).find('object is not iterable') != -1)\n\n\n def test_mad_sum(self):\n now = datetime.datetime.now()\n self.assertTrue(sum(7).mad_value == 7)\n self.assertTrue(sum(None).mad_value == None)\n self.assertTrue(sum(now).mad_value == now)\n self.assertTrue(sum([1, 2, 3]) == 6)\n self.assertTrue(sum([1, 2, 3], 2) == 8)\n self.assertTrue(sum('hello').mad_value == 'hello')\n with self.assertRaises(TypeError) as cm:\n sum(7, 8)\n\n\n def test_mad_source(self):\n this_file_path = os.path.realpath(__file__.rstrip('c'))\n test_dir_path = os.path.dirname(this_file_path)\n rbql_dir_path = os.path.dirname(test_dir_path)\n mad_max_path = os.path.join(test_dir_path, 'mad_max.py')\n template_path = os.path.join(rbql_dir_path, 'rbql', 'engine', 'template.py')\n original_data = read_file(mad_max_path)\n this_data = read_file(this_file_path)\n template_data = read_file(template_path)\n assert original_data.find('COPYPASTE') != -1\n assert this_data.find(original_data) != -1\n assert template_data.find(original_data) != -1\n","repo_name":"yrlihuan/LinuxConf","sub_path":".vim/rbql_core/test/test_mad_max.py","file_name":"test_mad_max.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13853553380","text":"import subprocess\nimport openai\nimport click\nimport os\nimport time\nimport threading\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\nactions = [\"TALK_TO_USER\", \"RUN_SHELL_COMMAND\", \"THINK\", \"READ_FILES\", \"WRITE_FILE\"]\n\ndef talk_to_user(rational, message):\n print(\"Iga's thoughts: \" + rational)\n print(\"Iga: \" + message)\n\ndef run_shell_command(rational, command):\n print(\"Iga's thoughts: \" + rational)\n print(\"Iga: Run command: \" + command)\n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, text=True)\n response = result.stdout.strip() if result.stdout.strip() else \"EMPTY\"\n print(response)\n if(response):\n return response\n else:\n return \"EMPTY\"\n\ndef think(rationale, prompt):\n print(\"Iga's thoughts: \" + rationale)\n print(\"Iga's thoughts: \" + prompt)\n return \"NEXT_ACTION\"\n\ndef read_files(rational, paths):\n print(\"Iga's thoughts: \" + rational)\n print(\"Iga: Reading files: \" + paths)\n files = paths.split(\"\\n\")\n files = [file for file in files if file]\n content = \"\"\n for file in files:\n content += file + '\\n'\n content += get_file(file) + '\\n'\n print(content)\n return content\n\ndef write_file(rational, contents):\n print(contents)\n print(\"Iga's thoughts: \" + rational)\n path, content = contents.split(\"\\n\", 1)\n print(\"Iga: Writing file:\" + path)\n print(content)\n with open(path, 'w') as file:\n file.write(content)\n return \"NEXT_ACTION\"\n\ndef get_file(path):\n with open(path, 'r') as file:\n content = file.read()\n return content\n\ndef parse_response(response):\n lines = response.split(\"\\n\")\n current_key = ''\n rationale = ''\n action = ''\n content = ''\n firstActionFound = False\n firstRationaleFound = False\n for line in lines:\n if line.startswith(\"RATIONALE\") and not firstRationaleFound:\n current_key = \"RATIONALE\"\n firstRationaleFound = True\n elif line.startswith(tuple(actions)) and not firstActionFound:\n current_key = line\n action = line\n firstActionFound = True\n elif current_key == \"RATIONALE\":\n rationale += line + \"\\n\"\n elif current_key in actions:\n content += line + '\\n'\n # Remove the last newline of the content if it's empty\n if content.endswith(\"\\n\"):\n content = content[:-1]\n return {\"action\": action, \"rationale\": rationale, \"content\": content, \"response_raw\": response}\n\ndef process_message(messages):\n try:\n response = openai.ChatCompletion.create(\n model=\"gpt-4\",\n messages=messages,\n max_tokens=2048,\n temperature=0.2,\n )\n\n generated_response = response.choices[0]['message']['content'].strip()\n parsed_response = parse_response(generated_response)\n parsed_response[\"success\"] = True\n return parsed_response\n\n except openai.OpenAIError as error:\n print(f\"An error occurred while calling the OpenAI API: {error}\")\n except ValueError as error:\n print(f\"An error occurred while parsing the response: {error}\")\n except Exception as error:\n print(f\"An unexpected error occurred: {error}\")\n\n return {\"success\": False}\n\n\ndef handle_action(messages):\n response_data = process_message(messages)\n if response_data[\"success\"]:\n messages.append({\"role\": \"assistant\", \"content\": response_data[\"response_raw\"]})\n print(messages)\n action = response_data[\"action\"]\n rationale = response_data[\"rationale\"]\n content = response_data[\"content\"]\n\n if action == \"TALK_TO_USER\":\n print(\"\") # Give some space\n talk_to_user(rationale, content)\n elif action == \"RUN_SHELL_COMMAND\":\n next_message = run_shell_command(rationale, content)\n messages.append({\"role\": \"user\", \"content\": next_message})\n messages = handle_action(messages)\n elif action == \"THINK\":\n next_message = think(rationale, content)\n messages.append({\"role\": \"user\", \"content\": next_message})\n messages = handle_action(messages)\n elif action == \"READ_FILES\":\n next_message = read_files(rationale, content)\n messages.append({\"role\": \"user\", \"content\": next_message})\n messages = handle_action(messages)\n elif action == \"WRITE_FILE\":\n next_message = write_file(rationale, content)\n messages.append({\"role\": \"user\", \"content\": next_message})\n messages = handle_action(messages)\n else:\n # If it fails, assume they're talking to the user\n talk_to_user(\"\", response_data[\"response_raw\"])\n else:\n print(\"Failed to process the message. Please try again.\")\n\n return messages\n\n@click.command()\ndef chat_cli():\n messages = [{\"role\": \"system\", \"content\": get_file(\"system_instructions.txt\")}]\n\n # Add a message from Iga as a welcome message\n welcome_message = \"Hello! I'm Iga, your personal AI assistant. How can I help you today?\"\n messages.append({\"role\": \"assistant\", \"content\": welcome_message})\n print(\"Iga: \" + welcome_message)\n\n while True:\n user_input = input(\"User: \")\n messages.append({\"role\": \"user\", \"content\": user_input})\n handle_action(messages)\n\nif __name__ == \"__main__\":\n chat_cli()\n\n","repo_name":"dennishansen/iga","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"81"} +{"seq_id":"12554156773","text":"import requests\nfrom PIL import Image\nimport io\nfrom time import sleep\nimport os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\n\n\nTRAIN_PATH = \"D:/Development/Classify/classify_dataset/train/\"\nTEST_PATH = \"D:/Development/Classify/classify_dataset/test/\"\n\n\ndef createFolder(path, folder):\n dirPath = os.path.join(path, folder)\n os.mkdir(dirPath)\n return dirPath\n\n\ndef saveImage(dirPath, index, content):\n image_file = io.BytesIO(content)\n image = Image.open(image_file).convert('RGB')\n path = dirPath + '/' + str(index) + '.jpg'\n with open(path, 'wb') as f:\n image.save(f, \"JPEG\", quality=85)\n\n\ndef downloadPictures(browser, name):\n searchBar = WebDriverWait(browser, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, '//input[@id=\"search_form_input\"]'))\n )\n searchBar.clear()\n searchBar.send_keys(name + ' pictures')\n searchBar.send_keys(Keys.ENTER)\n\n browser.execute_script(\"window.scrollTo({top: 0, behavior: 'smooth'});\")\n sleep(5)\n browser.execute_script(\n \"window.scrollTo({top: 50000, behavior: 'smooth'});\")\n sleep(5)\n\n imageContainers = browser.find_elements_by_xpath(\n '//div[@class=\"tile tile--img has-detail\"]')\n\n trainPath = createFolder(\n \"D:/Development/Classify/classify_dataset/train/\", name)\n print(trainPath)\n testPath = createFolder(\n \"D:/Development/Classify/classify_dataset/test/\", name)\n print(testPath)\n\n i = 0\n for container in imageContainers:\n try:\n url = container.find_element_by_xpath(\n './/img').get_attribute('src')\n print(\"Image \", i, \" url:\", url)\n imageContent = requests.get(url).content\n saveImage(trainPath, i, imageContent)\n if (i < 150):\n saveImage(trainPath, i, imageContent)\n elif(i >= 150 and i < 200):\n saveImage(testPath, i, imageContent)\n except:\n print('Error')\n\n i += 1\n\n\nopts = Options()\nopts.add_argument(\n 'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.102')\n\nbrowser = webdriver.Chrome(\n 'D:/Development/Classify/bot/drivers/chromedriver.exe', options=opts)\n\nbrowser.get('https://duckduckgo.com/')\n\n\nmainSearchBar = WebDriverWait(browser, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, '//input[@id=\"search_form_input_homepage\"]'))\n)\nmainSearchBar.send_keys('images')\n\nmainSearchButton = browser.find_element_by_xpath(\n '//input[@id=\"search_button_homepage\"]')\nmainSearchButton.click()\n\nimagesSection = WebDriverWait(browser, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, '//a[@data-zci-link=\"images\"]'))\n)\nimagesSection.click()\n\ndownloadPictures(browser, \"dog\")\ndownloadPictures(browser, \"cat\")\ndownloadPictures(browser, \"lion\")\n","repo_name":"EdwinLovo/Classify","sub_path":"bot/pictureDownloader.py","file_name":"pictureDownloader.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74237749386","text":"from __future__ import unicode_literals\n\nimport hashlib\nimport json\n\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.crypto import get_random_string\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom EnterpriseResourcePlanning import conf\nfrom EnterpriseResourcePlanning.conf import email_sending_service_enabled\nfrom General.models import StudentDetail, Division, FacultySubject\nfrom Registration.models import Faculty, Student, Branch\nfrom Timetable.models import Timetable\nfrom Roles.models import RoleManager\nimport json\n\n\ndef login_user(request):\n if request.method == \"POST\":\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect('/dashboard/')\n else:\n return render(request, 'login.html', {'error': 'Invalid credentials'})\n elif request.method == \"GET\":\n user = request.user\n if user.is_anonymous:\n return render(request, 'login.html')\n else:\n return HttpResponseRedirect('/dashboard/')\n\n return render(request, 'login.html')\n\n\n@csrf_exempt\ndef android_login(request):\n response = {\n 'user_type': 'null'\n }\n if request.method == 'POST':\n try:\n print(request.POST)\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n\n user = User.objects.get(username=username)\n # print(user.role)\n is_faculty = RoleManager.objects.filter(user=user, role__role='faculty')\n is_student = RoleManager.objects.filter(user=user, role__role='student')\n if is_faculty:\n faculty = user.faculty\n faculty_response = {\n 'user_type': 'Faculty',\n 'initials': user.faculty.initials,\n 'name': user.faculty.first_name + user.faculty.last_name,\n }\n\n # Add subjects taught by faculty division-wise\n\n theory = FacultySubject.objects.filter(faculty=user.faculty, subject__is_practical=False, is_active=True)\n practical = FacultySubject.objects.filter(faculty=user.faculty, subject__is_practical=True, is_active=True)\n subject_json = {}\n theory_json = {}\n practical_json = {}\n for each in theory:\n if each.division is not None:\n theory_json[each.division.division] = []\n for each in theory:\n if each.division is not None:\n theory_json[each.division.division] += [each.subject.short_form]\n for each in practical:\n if each.division is not None:\n practical_json[each.division.division] = []\n for each in practical:\n if each.division is not None:\n practical_json[each.division.division] += [each.subject.short_form]\n\n subject_json['theory'] = theory_json\n subject_json['practical'] = practical_json\n faculty_response['subjects'] = subject_json\n print(\"Subject JSON\", subject_json)\n\n # return HttpResponse(str(faculty_response))\n all_divisions = Division.objects.filter().all()\n\n attendance_list = {}\n\n for each_division in all_divisions:\n\n all_student = [each.student for each in\n StudentDetail.objects.filter(batch__division=each_division).distinct()]\n\n year = each_division.year_branch.year.year\n\n division = each_division.division\n branch = each_division.year_branch.branch.branch\n\n if year in attendance_list:\n\n if branch in attendance_list[year]:\n if division in attendance_list[year][branch]:\n pass\n else:\n attendance_list[year][branch][division] = {}\n else:\n attendance_list[year][branch] = {}\n attendance_list[year][branch][division] = {}\n\n else:\n attendance_list[year] = {}\n attendance_list[year][branch] = {}\n attendance_list[year][branch][division] = {}\n # attendance_list[year][branch][division] = sorted([\n # StudentRollNumber.objects.get(student=each_student.student, is_active=True) for\n # each_student in all_student])\n\n for each_student in all_student:\n\n roll_number = StudentDetail.objects.get(student=each_student,\n is_active=True).roll_number\n\n if 'all' in attendance_list[year][branch][division]:\n attendance_list[year][branch][division]['all'].append(roll_number)\n else:\n attendance_list[year][branch][division]['all'] = []\n attendance_list[year][branch][division]['all'].append(roll_number)\n curr_batch = StudentDetail.objects.get(student=each_student, is_active=True).batch.batch_name\n if curr_batch in attendance_list[year][branch][division]:\n attendance_list[year][branch][division][curr_batch].append(roll_number)\n else:\n attendance_list[year][branch][division][curr_batch] = []\n attendance_list[year][branch][division][curr_batch].append(roll_number)\n\n # attendance_list[year][branch][division] = {}\n #\n # for each_student in all_student:\n # attendance_list[year][branch][division].appned(\n # StudentRollNumber.objects.get(student=each_student, is_active=True).roll_number)\n\n faculty_response['attendance_list'] = attendance_list\n print(HttpResponse(json.dumps(faculty_response)))\n return HttpResponse(json.dumps(faculty_response))\n elif is_student:\n student = user.student\n student_detail = StudentDetail.objects.get(student=student, is_active=True)\n\n student_response = {\n 'user_type': 'Student',\n 'year': student_detail.batch.division.year_branch.year.year,\n 'branch': student_detail.batch.division.year_branch.branch.branch,\n 'division': student_detail.batch.division.division,\n 'batch': student_detail.batch.batch_name,\n 'name': user.student.first_name + user.student.last_name\n }\n print(student_response)\n return JsonResponse(student_response)\n else:\n return JsonResponse(response)\n\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n\n return JsonResponse(message)\n else:\n return render(request, 'login.html')\n\n\ndef generate_activation_key():\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789'\n secret_key = get_random_string(20, chars)\n\n return hashlib.sha256((secret_key).encode('utf-8')).hexdigest()","repo_name":"akzarma/EnterpriseResourcePlanning","sub_path":"Login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70323978505","text":"import abc\n\n\nclass Calculation(abc.ABC):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n @abc.abstractmethod\n def calculate(self):\n pass\n\n\nclass Addition(Calculation):\n def calculate(self):\n sum = []\n for i in range(0, len(self.a)):\n sum.append(self.a[i]+self.b[i])\n print(\"Sum of \", self.a, \" and \", self.b, \" is \", sum)\n\n\nclass Subtraction(Calculation):\n def calculate(self):\n diff = []\n for i in range(0, len(self.a)):\n diff.append(self.a[i]-self.b[i])\n print(\"Difference of \", self.a, \" and \", self.b, \" is \", diff)\n\n\nclass Multiplication(Calculation):\n def calculate(self):\n prod = []\n for i in range(0, len(self.a)):\n prod.append(self.a[i]*self.b[i])\n print(\"Product of \", self.a, \" and \", self.b, \" is \", prod)\n\n\nclass Division(Calculation):\n def calculate(self):\n quot = []\n for i in range(0, len(self.a)):\n quot.append(self.a[i]/self.b[i])\n print(\"Quotient of \", self.a, \" and \", self.b, \" is \", quot)\n\n\nl = int(input(\"Enter number of elements in list : \"))\np = []\nq = []\nprint(\"Enter list1 \")\nfor i in range(0, l):\n p.append(int(input()))\nprint(\"Enter list2 \")\nfor i in range(0, l):\n q.append(int(input()))\nprint(\"List 1 : \", p, \" \\t List 2 : \", q)\nwhile(True):\n print(\"*******Operations******** \\n1.Addition \\n2.Subtraction \\n3.Mul\\n4.Div\\n5.Exit\")\n ch = int(input(\"Enter choice : \"))\n if ch == 1:\n Addition(p, q).calculate()\n\n elif ch == 2:\n Subtraction(p, q).calculate()\n\n elif ch == 3:\n Multiplication(p, q).calculate()\n\n elif ch == 4:\n Division(p, q).calculate()\n\n elif ch == 5:\n exit()\n else:\n print(\"Invalid choice!\")\n","repo_name":"gnsaddy/RVCE","sub_path":"ThirdSemester/Advance_oops/fromServer/LabE4.py","file_name":"LabE4.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6408875980","text":"import numpy as np\nimport pandas as pd\nfrom keras.models import Sequential,Model\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import SGD\nfrom keras.models import load_model\nfrom keras.utils import np_utils\nfrom keras.callbacks import TensorBoard\nfrom keras.utils.vis_utils import plot_model\nfrom sklearn.model_selection import train_test_split\nfrom showimg import LossHistory\nfrom do_model import test_pre\n\n\ntest_train_data = np.load('data/counts1.npy')\naims = pd.read_csv('data/mydata.csv')\n\ndef get_best(num):\n # history = LossHistory()\n\n tensorboard = TensorBoard(log_dir='log/')\n callback_lists = [tensorboard] #因为callback是list型,必须转化为list\n\n # print(aims.aims)\n x_train, x_test, y_train, y_test = train_test_split(test_train_data,aims.aims,test_size=0.10)\n y_train, y_test = y_train.tolist(),y_test.tolist()\n y_train = np_utils.to_categorical(y_train,num_classes=2)\n y_test = np_utils.to_categorical(y_test,num_classes=2)\n # print(x_train[0])\n\n # 模型\n model = Sequential()\n # 去拟合训练\n model.add(Dense(units=16,input_dim=46,bias_initializer='one',activation='sigmoid',name=\"Dense_1\"))\n # bias_initializer='one',activation='tanh'\n\n\n model.add(Dropout(0.2))\n model.add(Dense(units=8,activation='relu'))#activation='relu'\n model.add(Dropout(0.2))\n model.add(Dense(units=2,activation='softmax'))\n\n # model.summary()\n\n\n # 损失函数使用交叉熵\n sgd = SGD(lr=0.003)\n model.compile(loss='categorical_crossentropy',#'binary_crossentropy''categorical_crossentropy'\n optimizer='rmsprop', # sgd, # 'rmsprop'\n metrics=['accuracy'])\n #模型估计\n model.fit(x_train, y_train, epochs=200, batch_size=25,callbacks=callback_lists)#verbose='True',callbacks=callback_lists)# callbacks=[history])\n\n\n loss,accuracy = model.evaluate(x_train, y_train)#(x_train, y_train)#(x_test,y_test)\n print('loss:',loss)\n print('accuracy:',accuracy)\n loss1,accuracy1 = model.evaluate(x_test,y_test)\n print('loss:',loss1)\n print('accuracy:',accuracy1)\n\n\n plot_model(model, to_file='model1.png',show_shapes=True)\n # history.loss_plot('epoch')\n model.save('test_models/model{}.h5'.format(str(num)))\n logs = open('test_models/logs.txt','a')\n logs.write(str(num) + '==' + str(accuracy1) + '||' + str(accuracy) + '||' + str(loss1) + '||' + str(loss) + '--------------\\n')\n\ndef train_best(model0,epoch=200,num=1):\n tensorboard = TensorBoard(log_dir='log/')\n callback_lists = [tensorboard] #因为callback是list型,必须转化为list\n\n x_train, x_test, y_train, y_test = train_test_split(test_train_data,aims.aims,test_size=0.10)\n y_train, y_test = y_train.tolist(),y_test.tolist()\n y_train = np_utils.to_categorical(y_train,num_classes=2)\n y_test = np_utils.to_categorical(y_test,num_classes=2)\n\n # 训练完成 导入模型\n model = load_model(model0)\n\n # 损失函数使用交叉熵\n sgd = SGD(lr=0.003)\n model.compile(loss='binary_crossentropy',#'binary_crossentropy''categorical_crossentropy'\n optimizer='rmsprop', # sgd, # 'rmsprop'\n metrics=['accuracy'])\n #模型估计\n model.fit(x_train, y_train, epochs=epoch, batch_size=25,callbacks=callback_lists)#verbose='True',callbacks=callback_lists)# callbacks=[history])\n\n loss,accuracy = model.evaluate(x_train, y_train)#(x_train, y_train)#(x_test,y_test)\n print('loss:',loss)\n print('accuracy:',accuracy)\n loss1,accuracy1 = model.evaluate(x_test,y_test)\n print('loss:',loss1)\n print('accuracy:',accuracy1)\n\n\n model.save('train_models/{}.h5'.format(str(num)))\n # logs = open('test_models/logs.txt','a')\n # logs.write(str(num) + '==' + str(accuracy1) + '||' + str(accuracy) + '||' + str(loss1) + '||' + str(loss) + '--------------\\n')\n\n# for i in range(10):\n# get_best(i)\n # train_best(model0='test_models/model10.h5',epoch=200,num=i)\n\nfor i in range(10):\n test_pre('test_models/model{}.h5'.format(str(i)))\n print('---'*10)","repo_name":"aboutmydreams/MathAmino","sub_path":"best.py","file_name":"best.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73909985226","text":"import torch\nfrom preprocessing.tokenizer import Tokenizer\nfrom trainer import GPTTrainer, activation_functions_dict\nimport re\nimport numpy as np\nimport onnxruntime as ort\nimport time\n\ndef onnx_response(tokenizer_path: str, checkpoint: str, device: str, max_ctx: int):\n tokenizer = Tokenizer(tokenizer_path)\n end_token = tokenizer.get_special_token(\"end\")\n load_start_time = time.time()\n providers = ['CPUExecutionProvider']\n if device != \"cpu\" and ort.get_device() == 'GPU':\n providers = ['CUDAExecutionProvider'] + providers\n\n onnx_session = ort.InferenceSession(checkpoint, providers=providers)\n load_end_time = time.time()\n print(f\"Total loading time: {load_end_time - load_start_time}\")\n\n while True:\n text_input = input(\"Message: \")\n \n infer_start_time = time.time()\n digits = tokenizer.text_to_sequences([text_input], start_token=True, sep_token=True)\n\n message_length = digits.shape[1]\n\n infer_start_time = time.time()\n for _ in range(max_ctx):\n output = onnx_session.run(\n None,\n {'input': digits}\n )[0]\n\n pred_token = np.argmax(output[:, -1, :], axis=-1)\n if pred_token == end_token:\n break\n\n digits = np.concatenate((digits, np.expand_dims(pred_token, axis=0)), axis=-1)\n infer_end_time = time.time()\n \n words = tokenizer.decode(digits[0][message_length:])\n response = \"\"\n \n upper_flag = False\n upper_all_flag = False\n for word in words:\n if word in tokenizer.special_tokens:\n if word == \"\":\n upper_flag = True\n elif word == \"\":\n upper_all_flag = True\n elif word == \"\":\n response += \"\\n\"\n continue\n else:\n if upper_flag:\n upper_flag = False\n response += str(word).capitalize()\n elif upper_all_flag:\n upper_all_flag = False\n response += str(word).upper()\n else:\n response += word\n\n response = re.sub(tokenizer.word_break_icon, \" \", response).capitalize()\n \n print(f\"Response:\\n{response}\")\n print(f\"Total inference time: {infer_end_time - infer_start_time}\")\n\n exit = input('Do you want to exit? (y/n): ').lower().strip() == 'y'\n\n if exit:\n break\n\ndef jit_response(tokenizer_path: str, checkpoint: str, device: str, max_ctx: int):\n tokenizer = Tokenizer(tokenizer_path)\n end_token = tokenizer.get_special_token(\"end\")\n\n print(device)\n\n load_start_time = time.time()\n model = torch.jit.load(checkpoint)\n model.eval()\n model.to(device)\n load_end_time = time.time()\n\n print(f\"Total loading time: {load_end_time - load_start_time}\")\n\n while True:\n text_input = input(\"Message: \")\n infer_start_time = time.time()\n digits = tokenizer.text_to_sequences([text_input], start_token=True, sep_token=True)\n digits = torch.tensor(digits).to(device)\n\n message_length = digits.shape[1]\n \n for _ in range(max_ctx):\n output = model(digits)\n\n _, pred_token = torch.max(output[:, -1, :], dim=-1)\n\n if pred_token == end_token:\n break\n\n digits = torch.concatenate((digits, pred_token.unsqueeze(0)), dim=-1)\n\n infer_end_time = time.time()\n\n response = tokenizer.decode(digits[0][message_length:])\n\n print(f\"Response\\n: {response}\")\n print(f\"Total inference time: {infer_end_time - infer_start_time}\")\n\n exit = input('Do you want to exit? (y/n): ').lower().strip() == 'y'\n\n if exit:\n break\n\n\ndef trainer_repsonse(\n n:int,\n d_model: int,\n heads: int,\n d_ff: int,\n eps: float,\n activation,\n dropout_rate: float,\n tokenizer_path: str, checkpoint: str, device: str, max_ctx: int\n ):\n tokenizer = Tokenizer(tokenizer_path)\n \n load_start_time = time.time()\n trainer = GPTTrainer(\n token_size=len(tokenizer.dictionary),\n n=n,\n heads=heads,\n d_ff=d_ff,\n dropout_rate=dropout_rate,\n eps=eps,\n activation=activation,\n\n checkpoint=checkpoint,\n device=device\n )\n load_end_time = time.time()\n print(f\"Total loading time: {load_end_time - load_start_time}\")\n\n while True:\n text_input = input(\"Message: \")\n infer_start_time = time.time()\n digits = tokenizer.text_to_sequences([text_input], start_token=True, sep_token=True)\n digits = torch.tensor(digits).to(device)\n\n message_length = digits.shape[1]\n digits_output = trainer.generate(digits, max_ctx, tokenizer.get_special_token(\"end\"))\n infer_end_time = time.time()\n response = tokenizer.decode(digits_output[0][message_length:])\n\n print(f\"Response: {response}\")\n print(f\"Total inference time: {infer_end_time - infer_start_time}\")\n\n exit = input('Do you want to exit? (y/n): ').lower().strip() == 'y'\n\n if exit:\n break\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n # Model Hyper-parameters\n parser.add_argument(\"--n\", type=int, default=12)\n parser.add_argument(\"--d_model\", type=int, default=768)\n parser.add_argument(\"--heads\", type=int, default=12)\n parser.add_argument(\"--d_ff\", type=int, default=3072)\n parser.add_argument(\"--dropout_rate\", type=float, default=0.1)\n parser.add_argument(\"--eps\", type=float, default=0.02)\n parser.add_argument(\"--activation\", type=str, default='gelu')\n\n parser.add_argument(\"--tokenizer_path\", type=str)\n parser.add_argument(\"--checkpoint\", type=str)\n parser.add_argument(\"--device\",type=str, default='cpu')\n parser.add_argument(\"--max_ctx\", type=int, default=250)\n parser.add_argument(\"--model_type\", default='trainer')\n\n args = parser.parse_args()\n\n if args.model_type == 'trainer':\n print(\"Trainer Mode\")\n trainer_repsonse(\n n=args.n,\n d_model=args.d_model,\n heads=args.heads,\n d_ff=args.d_ff,\n eps=args.eps,\n activation=activation_functions_dict[args.activation],\n dropout_rate=args.dropout_rate,\n tokenizer_path=args.tokenizer_path,\n checkpoint=args.checkpoint,\n device=args.device,\n max_ctx=args.max_ctx\n )\n elif args.model_type == 'onnx':\n print(\"ONNX Mode\")\n onnx_response(\n tokenizer_path=args.tokenizer_path,\n checkpoint=args.checkpoint,\n device=args.device,\n max_ctx=args.max_ctx\n )\n else:\n print(\"TorchScript Mode\")\n jit_response(\n tokenizer_path=args.tokenizer_path,\n checkpoint=args.checkpoint,\n device=args.device,\n max_ctx=args.max_ctx\n )","repo_name":"Alan-404/GPT","sub_path":"cmd_chat.py","file_name":"cmd_chat.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33342721568","text":"from typing import Optional, Dict, List, Union\n\nfrom deeplake.core.dataset import Dataset\n\n\nclass TensorStructure:\n \"\"\"Contains the name and necessary parameters of a tensor to be created in a dataset.\"\"\"\n\n def __init__(\n self,\n name: str,\n params: Optional[Dict] = None,\n ) -> None:\n self.name = name\n self.params = params if params is not None else dict()\n\n def create(self, ds: Dataset):\n ds.create_tensor(self.name, **self.params)\n\n def create_missing(self, ds: Dataset):\n if self.name not in ds.tensors:\n self.create(ds)\n\n\nclass GroupStructure:\n \"\"\"Represents a group in a dataset, containing a list of TensorStructure and nested GroupStructure objects.\"\"\"\n\n def __init__(\n self,\n name: str,\n items: Optional[List[Union[TensorStructure, \"GroupStructure\"]]] = None,\n ):\n self.name = name\n self.items = items if items is not None else []\n\n @property\n def groups(self):\n return [g for g in self.items if isinstance(g, GroupStructure)]\n\n @property\n def tensors(self):\n return [t for t in self.items if isinstance(t, TensorStructure)]\n\n @property\n def all_keys(self):\n keys = set([f\"{self.name}/{t.name}\" for t in self.tensors])\n for g in self.groups:\n keys.update([f\"{self.name}/{k}\" for k in g.all_keys])\n\n return keys\n\n def add_item(self, item: Union[TensorStructure, \"GroupStructure\"]):\n self.items.append(item)\n\n def create(self, ds: Dataset):\n ds.create_group(self.name)\n\n for item in self.items:\n item.create(ds=ds[self.name])\n\n def create_missing(self, ds: Dataset):\n if self.name not in ds.groups:\n ds.create_group(self.name)\n\n for item in self.items:\n item.create_missing(ds=ds[self.name])\n\n\nclass DatasetStructure:\n \"\"\"\n Represents a collection of TensorStructure and GroupStructure objects, forming\n the structure of a dataset parsed by an ingestion template.\n\n Supports adding items, creating the tensors and groups (or only the missing ones) in a given Dataset object.\n \"\"\"\n\n def __init__(\n self,\n structure: Optional[List[Union[TensorStructure, GroupStructure]]] = None,\n ignore_one_group: bool = False,\n ) -> None:\n \"\"\"Creates a new DatasetStructure object.\n\n Args:\n structure: An initial list of TensorStructure and GroupStructure objects.\n ignore_one_group: If True, the structure will be flattened if it contains only one group.\n \"\"\"\n self.structure = structure if structure is not None else []\n self.ignore_one_group = ignore_one_group\n\n def __getitem__(self, key):\n try:\n return [i for i in self.structure if i.name == key][0]\n except IndexError:\n raise KeyError(f\"Key {key} not found in structure.\")\n\n def add_first_level_tensor(self, tensor: TensorStructure):\n self.structure.append(tensor)\n\n def add_group(self, group: GroupStructure):\n self.structure.append(group)\n\n @property\n def groups(self):\n return [g for g in self.structure if isinstance(g, GroupStructure)]\n\n @property\n def tensors(self):\n return [t for t in self.structure if isinstance(t, TensorStructure)]\n\n @property\n def all_keys(self):\n keys = set([t.name for t in self.tensors])\n groups = self.groups\n\n if self.ignore_one_group and len(groups) == 1:\n keys.update([t.name for t in groups[0].tensors])\n return keys\n\n for group in groups:\n keys.update(group.all_keys)\n\n return keys\n\n def create_full(self, ds: Dataset):\n first_level_tensors = self.tensors\n groups = self.groups\n\n for tensor in first_level_tensors:\n tensor.create(ds)\n\n if self.ignore_one_group and len(groups) == 1:\n for tensor in groups[0].tensors:\n tensor.create(ds)\n return\n\n for group in groups:\n group.create(ds)\n\n def create_missing(self, ds: Dataset):\n first_level_tensors = self.tensors\n groups = self.groups\n\n for tensor in first_level_tensors:\n tensor.create_missing(ds)\n\n if self.ignore_one_group and len(groups) == 1:\n for tensor in groups[0].tensors:\n tensor.create_missing(ds)\n return\n\n for group in groups:\n group.create_missing(ds)\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/auto/unstructured/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"} +{"seq_id":"70706715144","text":"\nimport math\nimport time as tm\n\nfrom sage.crypto.sbox import SBox\n\nfrom claasp.cipher_modules.models.cp.cp_model import CpModel, solve_satisfy\nfrom claasp.name_mappings import (CONSTANT, INTERMEDIATE_OUTPUT, CIPHER_OUTPUT, SBOX, MIX_COLUMN, WORD_OPERATION,\n XOR_DIFFERENTIAL, LINEAR_LAYER)\n\n\ndef and_xor_differential_probability_ddt(numadd):\n \"\"\"\n Return the ddt of the and operation.\n\n INPUT:\n\n - ``numadd`` -- **integer**; the number of addenda\n\n EXAMPLES::\n\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: and_xor_differential_probability_ddt)\n sage: from claasp.ciphers.block_ciphers.simon_block_cipher import SimonBlockCipher\n sage: simon = SimonBlockCipher()\n sage: and_xor_differential_probability_ddt(2)\n [4, 0, 2, 2, 2, 2, 2, 2]\n \"\"\"\n n = pow(2, numadd)\n ddt_table = []\n for i in range(n):\n for m in range(2):\n count = 0\n for j in range(n):\n k = i ^ j\n binary_j = format(j, f'0{numadd}b')\n result_j = 1\n binary_k = format(k, f'0{numadd}b')\n result_k = 1\n for index in range(numadd):\n result_j *= int(binary_j[index])\n result_k *= int(binary_k[index])\n difference = result_j ^ result_k\n if difference == m:\n count += 1\n ddt_table.append(count)\n\n return ddt_table\n\n\ndef update_and_or_ddt_valid_probabilities(and_already_added, component, cp_declarations, valid_probabilities):\n numadd = component.description[1]\n if numadd not in and_already_added:\n ddt_table = and_xor_differential_probability_ddt(numadd)\n dim_ddt = len([i for i in ddt_table if i])\n ddt_entries = []\n ddt_values = ''\n set_of_occurrences = set(ddt_table)\n set_of_occurrences -= {0}\n valid_probabilities.update({round(100 * math.log2(2 ** numadd / occurrence))\n for occurrence in set_of_occurrences})\n for i in range(pow(2, numadd + 1)):\n if ddt_table[i] != 0:\n binary_i = format(i, f'0{numadd + 1}b')\n ddt_entries += [f'{binary_i[j]}' for j in range(numadd + 1)]\n ddt_entries.append(str(round(100 * math.log2(pow(2, numadd) / ddt_table[i]))))\n ddt_values = ','.join(ddt_entries)\n and_declaration = f'array [1..{dim_ddt}, 1..{numadd + 2}] of int: ' \\\n f'and{numadd}inputs_DDT = array2d(1..{dim_ddt}, 1..{numadd + 2}, ' \\\n f'[{ddt_values}]);'\n cp_declarations.append(and_declaration)\n and_already_added.append(numadd)\n\n\nclass CpXorDifferentialTrailSearchModel(CpModel):\n\n def __init__(self, cipher):\n self._first_step = []\n self._first_step_find_all_solutions = []\n self._cp_xor_differential_constraints = []\n super().__init__(cipher)\n\n def build_xor_differential_trail_model(self, weight=-1, fixed_variables=[]):\n \"\"\"\n Build the CP model for the search of XOR differential trails.\n\n INPUT:\n\n - ``weight`` -- **integer** (default: `1`); a specific weight. If set to non-negative integer, fixes the XOR\n trail weight\n - ``fixed_variables`` -- **list** (default: `[]`); dictionaries containing the variables to be fixed in\n standard format\n\n EXAMPLES::\n\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: CpXorDifferentialTrailSearchModel)\n sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher\n sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list\n sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)\n sage: cp = CpXorDifferentialTrailSearchModel(speck)\n sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64),\n ....: integer_to_bit_list(0, 64, 'little'))]\n sage: fixed_variables.append(set_fixed_variables('plaintext', 'equal', range(32),\n ....: integer_to_bit_list(0, 32, 'little')))\n sage: cp.build_xor_differential_trail_model(-1, fixed_variables)\n \"\"\"\n self.initialise_model()\n self.c = 0\n self.sbox_mant = []\n self.input_sbox = []\n self.component_and_probability = {}\n self.table_of_solutions_length = 0\n self.build_xor_differential_trail_model_template(weight, fixed_variables)\n variables, constraints = self.input_xor_differential_constraints()\n self._model_prefix.extend(variables)\n self._variables_list.extend(constraints)\n self._model_constraints.extend(self.final_xor_differential_constraints(weight))\n self._model_constraints = self._model_prefix + self._variables_list + self._model_constraints\n\n def build_xor_differential_trail_model_template(self, weight, fixed_variables):\n variables = []\n self._variables_list = []\n constraints = self.fix_variables_value_constraints(fixed_variables)\n component_types = [CONSTANT, INTERMEDIATE_OUTPUT, CIPHER_OUTPUT, LINEAR_LAYER, SBOX, MIX_COLUMN, WORD_OPERATION]\n operation_types = ['AND', 'MODADD', 'MODSUB', 'NOT', 'OR', 'ROTATE', 'SHIFT', 'XOR']\n self._model_constraints = constraints\n\n for component in self._cipher.get_all_components():\n operation = component.description[0]\n if component.type not in component_types or (\n WORD_OPERATION == component.type and operation not in operation_types):\n print(f'{component.id} not yet implemented')\n else:\n variables, constraints = component.cp_xor_differential_propagation_constraints(self)\n\n self._variables_list.extend(variables)\n self._model_constraints.extend(constraints)\n\n if weight != -1:\n variables, constraints = self.weight_constraints(weight)\n self._variables_list.extend(variables)\n self._model_constraints.extend(constraints)\n\n def final_xor_differential_constraints(self, weight):\n \"\"\"\n Return a CP constraints list for the cipher outputs and solving indications for single or second step model.\n\n INPUT:\n\n - ``weight`` -- **integer**; a specific weight. If set to non-negative integer, fixes the XOR trail weight\n\n EXAMPLES::\n\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: CpXorDifferentialTrailSearchModel)\n sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher\n sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list\n sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)\n sage: cp = CpXorDifferentialTrailSearchModel(speck)\n sage: fixed_variables = [set_fixed_variables('key', 'equal', range(64),\n ....: integer_to_bit_list(0, 64, 'little'))]\n sage: fixed_variables.append(set_fixed_variables('plaintext', 'equal', range(32),\n ....: integer_to_bit_list(0, 32, 'little')))\n sage: cp.build_xor_differential_trail_model(-1, fixed_variables)\n sage: cp.final_xor_differential_constraints(-1)[:-1]\n ['solve:: int_search(p, smallest, indomain_min, complete) minimize weight;']\n \"\"\"\n cipher_inputs = self._cipher.inputs\n cp_constraints = []\n if weight == -1 and self._probability:\n cp_constraints.append('solve:: int_search(p, smallest, indomain_min, complete) minimize weight;')\n else:\n cp_constraints.append(solve_satisfy)\n new_constraint = 'output['\n for element in cipher_inputs:\n new_constraint = new_constraint + f'\\\"{element} = \\\"++ show({element}) ++ \\\"\\\\n\\\" ++'\n for component in self._cipher.get_all_components():\n if SBOX in component.type:\n new_constraint = new_constraint + \\\n f'\\\"{component.id} = \\\"++ show({component.id})++ \\\"\\\\n\\\" ++ ' \\\n f'show(p[{self.component_and_probability[component.id]}]/100) ++ \\\"\\\\n\\\" ++'\n elif WORD_OPERATION in component.type:\n new_constraint = self.get_word_operation_xor_differential_constraints(component, new_constraint)\n else:\n new_constraint = new_constraint + f'\\\"{component.id} = \\\"++ ' \\\n f'show({component.id})++ \\\"\\\\n\\\" ++ \\\"0\\\" ++ \\\"\\\\n\\\" ++'\n new_constraint = new_constraint + '\\\"Trail weight = \\\" ++ show(weight)];'\n cp_constraints.append(new_constraint)\n\n return cp_constraints\n\n def find_all_xor_differential_trails_with_fixed_weight(self, fixed_weight, fixed_values=[], solver_name='Chuffed'):\n \"\"\"\n Return a list of solutions containing all the differential trails having the ``fixed_weight`` weight.\n\n INPUT:\n\n - ``fixed_weight`` -- **integer**; the weight to be fixed\n - ``fixed_values`` -- **list** (default: `[]`); can be created using ``set_fixed_variables`` method\n - ``solver_name`` -- **string** (default: `Chuffed`); the name of the solver. Available values are:\n\n * ``'Chuffed'``\n * ``'Gecode'``\n * ``'COIN-BC'``\n\n EXAMPLES::\n\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: CpXorDifferentialTrailSearchModel)\n sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher\n sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list\n sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=2)\n sage: cp = CpXorDifferentialTrailSearchModel(speck)\n sage: fixed_values = []\n sage: fixed_values.append(set_fixed_variables('key', 'equal', list(range(16)),\n ....: integer_to_bit_list(0, 16, 'big')))\n sage: fixed_values.append(set_fixed_variables('plaintext', 'not_equal', list(range(8)),\n ....: integer_to_bit_list(0, 8, 'big')))\n sage: trails = cp.find_all_xor_differential_trails_with_fixed_weight(1, fixed_values, 'Chuffed') # long\n ...\n sage: len(trails) # long\n 6\n \"\"\"\n start = tm.time()\n self.build_xor_differential_trail_model(fixed_weight, fixed_values)\n end = tm.time()\n build_time = end - start\n solutions = self.solve(XOR_DIFFERENTIAL, solver_name)\n for solution in solutions:\n solution['building_time_seconds'] = build_time\n\n return solutions\n\n def find_all_xor_differential_trails_with_weight_at_most(self, min_weight, max_weight=64, fixed_values=[],\n solver_name='Chuffed'):\n \"\"\"\n Return a list of solutions containing all the differential trails.\n\n The differential trails having the weight of correlation lying in the interval ``[min_weight, max_weight]``.\n\n INPUT:\n\n - ``min_weight`` -- **integer**; the weight from which to start the search\n - ``max_weight`` -- **integer** (default: 64); the weight at which the search stops\n - ``fixed_values`` -- **list** (default: `[]`); can be created using ``set_fixed_variables`` method\n - ``solver_name`` -- **string** (default: `Chuffed`); the name of the solver. Available values are:\n\n * ``'Chuffed'``\n * ``'Gecode'``\n * ``'COIN-BC'``\n\n EXAMPLES::\n\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: CpXorDifferentialTrailSearchModel)\n sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher\n sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list\n sage: speck = SpeckBlockCipher(block_bit_size=8, key_bit_size=16, number_of_rounds=2)\n sage: cp = CpXorDifferentialTrailSearchModel(speck)\n sage: fixed_values = []\n sage: fixed_values.append(set_fixed_variables('key', 'equal', list(range(16)),\n ....: integer_to_bit_list(0, 16, 'big')))\n sage: fixed_values.append(set_fixed_variables('plaintext', 'not_equal', list(range(8)),\n ....: integer_to_bit_list(0, 8, 'big')))\n sage: trails = cp.find_all_xor_differential_trails_with_weight_at_most(0,1, fixed_values, 'Chuffed')\n ...\n sage: len(trails) # long\n 7\n \"\"\"\n start = tm.time()\n self.build_xor_differential_trail_model(0, fixed_values)\n self._model_constraints.append(f'constraint weight >= {100 * min_weight} /\\\\ weight <= {100 * max_weight} ')\n end = tm.time()\n build_time = end - start\n solutions = self.solve(XOR_DIFFERENTIAL, solver_name)\n for solution in solutions:\n solution['building_time_seconds'] = build_time\n\n return solutions\n\n def find_differential_weight(self, fixed_values=[], solver_name='Chuffed'):\n probability = 0\n self.build_xor_differential_trail_model(-1, fixed_values)\n solutions = self.solve(XOR_DIFFERENTIAL, solver_name)\n if isinstance(solutions, list):\n for solution in solutions:\n weight = solution['total_weight']\n probability += 1 / 2 ** weight\n return math.log2(1 / probability)\n else:\n return solutions['total_weight']\n\n def find_lowest_weight_xor_differential_trail(self, fixed_values=[], solver_name='Chuffed'):\n \"\"\"\n Return the solution representing a differential trail with the lowest weight of correlation.\n\n .. NOTE::\n\n There could be more than one trail with the lowest weight. In order to find all the lowest weight\n trail, run :py:meth:`~SmtModel.find_all_xor_differential_trails_with_fixed_weight`.\n\n INPUT:\n\n - ``fixed_values`` -- **list** (default: `[]`); can be created using ``set_fixed_variables`` method\n - ``solver_name`` -- **string** (default: `Chuffed`); the name of the solver. Available values are:\n\n * ``'Chuffed'``\n * ``'Gecode'``\n * ``'COIN-BC'``\n\n EXAMPLES::\n\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: CpXorDifferentialTrailSearchModel)\n sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher\n sage: from claasp.cipher_modules.models.utils import set_fixed_variables, integer_to_bit_list\n sage: speck = SpeckBlockCipher(number_of_rounds=5)\n sage: cp = CpXorDifferentialTrailSearchModel(speck)\n sage: fixed_values = []\n sage: fixed_values.append(set_fixed_variables('key', 'equal', list(range(64)),\n ....: integer_to_bit_list(0, 64, 'big')))\n sage: fixed_values.append(set_fixed_variables('plaintext', 'not_equal', list(range(32)),\n ....: integer_to_bit_list(0, 32, 'big')))\n sage: cp.find_lowest_weight_xor_differential_trail(fixed_values,'Chuffed') # random\n {'building_time': 0.007165431976318359,\n 'cipher_id': 'speck_p32_k64_o32_r4',\n 'components_values': {'cipher_output_4_12': {'value': '850a9520',\n 'weight': 0},\n ...\n 'total_weight': '9.0'}\n \"\"\"\n start = tm.time()\n self.build_xor_differential_trail_model(-1, fixed_values)\n end = tm.time()\n build_time = end - start\n solution = self.solve('xor_differential_one_solution', solver_name)\n solution['building_time_seconds'] = build_time\n\n return solution\n\n def find_one_xor_differential_trail(self, fixed_values=[], solver_name='Chuffed'):\n \"\"\"\n Return the solution representing a differential trail with any weight.\n\n INPUT:\n\n - ``fixed_values`` -- **list** (default: `[]`); can be created using ``set_fixed_variables`` method\n - ``solver_name`` -- **string** (default: `Chuffed`); the name of the solver. Available values are:\n\n * ``'Chuffed'``\n * ``'Gecode'``\n * ``'COIN-BC'``\n\n EXAMPLES::\n\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: CpXorDifferentialTrailSearchModel)\n sage: from claasp.cipher_modules.models.utils import set_fixed_variables\n sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher\n sage: speck = SpeckBlockCipher(number_of_rounds=2)\n sage: cp = CpXorDifferentialTrailSearchModel(speck)\n sage: plaintext = set_fixed_variables(\n ....: component_id='plaintext',\n ....: constraint_type='not_equal',\n ....: bit_positions=range(32),\n ....: bit_values=[0]*32)\n sage: cp.find_one_xor_differential_trail([plaintext], 'Chuffed') # random\n {'cipher_id': 'speck_p32_k64_o32_r2',\n 'model_type': 'xor_differential_one_solution',\n ...\n 'cipher_output_1_12': {'value': 'ffff0000', 'weight': 0}},\n 'total_weight': '18.0'}\n \"\"\"\n start = tm.time()\n self.build_xor_differential_trail_model(0, fixed_values)\n end = tm.time()\n build_time = end - start\n solution = self.solve('xor_differential_one_solution', solver_name)\n solution['building_time_seconds'] = build_time\n\n return solution\n\n def find_one_xor_differential_trail_with_fixed_weight(self, fixed_weight=-1, fixed_values=[],\n solver_name='Chuffed'):\n \"\"\"\n Return the solution representing a differential trail with the weight of correlation equal to ``fixed_weight``.\n\n INPUT:\n\n - ``fixed_weight`` -- **integer**; the value to which the weight is fixed, if non-negative\n - ``fixed_values`` -- **list** (default: `[]`); can be created using ``set_fixed_variables`` method\n - ``solver_name`` -- **string** (default: `Chuffed`); the name of the solver. Available values are:\n\n * ``'Chuffed'``\n * ``'Gecode'``\n * ``'COIN-BC'``\n\n EXAMPLES::\n\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: CpXorDifferentialTrailSearchModel)\n sage: from claasp.cipher_modules.models.utils import set_fixed_variables\n sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher\n sage: speck = SpeckBlockCipher(number_of_rounds=5)\n sage: cp = CpXorDifferentialTrailSearchModel(speck)\n sage: plaintext = set_fixed_variables(\n ....: component_id='plaintext',\n ....: constraint_type='not_equal',\n ....: bit_positions=range(32),\n ....: bit_values=[0]*32)\n sage: cp.find_one_xor_differential_trail_with_fixed_weight(9, [plaintext], 'Chuffed') # random\n {'cipher_id': 'speck_p32_k64_o32_r5',\n 'model_type': 'xor_differential_one_solution',\n ...\n 'total_weight': '9.0',\n 'building_time_seconds': 0.0013153553009033203}\n \"\"\"\n start = tm.time()\n self.build_xor_differential_trail_model(fixed_weight, fixed_values)\n end = tm.time()\n build_time = end - start\n solution = self.solve('xor_differential_one_solution', solver_name)\n solution['building_time_seconds'] = build_time\n\n return solution\n\n def get_word_operation_xor_differential_constraints(self, component, new_constraint):\n if 'AND' in component.description[0] or 'MODADD' in component.description[0]:\n new_constraint = new_constraint + f'\\\"{component.id} = \\\"++ show({component.id})++ \\\"\\\\n\\\" ++ show('\n for i in range(len(self.component_and_probability[component.id])):\n new_constraint = new_constraint + f'p[{self.component_and_probability[component.id][i]}]/100+'\n new_constraint = new_constraint[:-1] + ') ++ \\\"\\\\n\\\" ++'\n else:\n new_constraint = new_constraint + f'\\\"{component.id} = \\\"++ ' \\\n f'show({component.id})++ \\\"\\\\n\\\" ++ \\\"0\\\" ++ \\\"\\\\n\\\" ++'\n\n return new_constraint\n\n def input_xor_differential_constraints(self):\n \"\"\"\n Return a list of CP declarations and a list of Cp constraints for the first part of the xor differential model.\n\n INPUT:\n\n - None\n\n EXAMPLES::\n\n sage: from claasp.ciphers.block_ciphers.speck_block_cipher import SpeckBlockCipher\n sage: from claasp.cipher_modules.models.cp.cp_models.cp_xor_differential_trail_search_model import (\n ....: CpXorDifferentialTrailSearchModel)\n sage: speck = SpeckBlockCipher(block_bit_size=32, key_bit_size=64, number_of_rounds=4)\n sage: cp = CpXorDifferentialTrailSearchModel(speck)\n sage: cp.input_xor_differential_constraints()\n (['array[0..31] of var 0..1: plaintext;',\n 'array[0..63] of var 0..1: key;',\n ...\n 'array[0..31] of var 0..1: cipher_output_3_12;',\n 'array[0..6] of var {0, 900, 200, 1100, 400, 1300, 600, 1500, 800, 100, 1000, 300, 1200, 500, 1400, 700}: p;',\n 'var int: weight = sum(p);'],\n [])\n \"\"\"\n self._cp_xor_differential_constraints = [f'array[0..{bit_size - 1}] of var 0..1: {input_};'\n for input_, bit_size in zip(self._cipher.inputs, self._cipher.inputs_bit_size)]\n self.sbox_mant = []\n prob_count = 0\n valid_probabilities = {0}\n and_already_added = []\n for component in self._cipher.get_all_components():\n if CONSTANT not in component.type:\n output_id_link = component.id\n self._cp_xor_differential_constraints.append(f'array[0..{int(component.output_bit_size) - 1}] of var 0..1: {output_id_link};')\n if SBOX in component.type:\n prob_count += 1\n self.update_sbox_ddt_valid_probabilities(component, valid_probabilities)\n elif WORD_OPERATION in component.type:\n if 'AND' in component.description[0] or component.description[0] == 'OR':\n prob_count += component.description[1] * component.output_bit_size\n update_and_or_ddt_valid_probabilities(and_already_added, component, self._cp_xor_differential_constraints,\n valid_probabilities)\n elif 'MODADD' in component.description[0]:\n prob_count += component.description[1] - 1\n output_size = component.output_bit_size\n valid_probabilities |= set(range(100 * output_size)[::100])\n cp_declarations_weight = 'int: weight = 0;'\n if prob_count > 0:\n self._probability = True\n new_declaration = f'array[0..{prob_count - 1}] of var {valid_probabilities}: p;'\n self._cp_xor_differential_constraints.append(new_declaration)\n cp_declarations_weight = 'var int: weight = sum(p);'\n self._cp_xor_differential_constraints.append(cp_declarations_weight)\n cp_constraints = []\n\n return self._cp_xor_differential_constraints, cp_constraints\n\n def update_sbox_ddt_valid_probabilities(self, component, valid_probabilities):\n input_size = int(component.input_bit_size)\n output_id_link = component.id\n description = component.description\n sbox = SBox(description)\n sbox_already_in = False\n for mant in self.sbox_mant:\n if description == mant[0]:\n sbox_already_in = True\n if not sbox_already_in:\n sbox_ddt = sbox.difference_distribution_table()\n for i in range(sbox_ddt.nrows()):\n set_of_occurrences = set(sbox_ddt.rows()[i])\n set_of_occurrences -= {0}\n valid_probabilities.update({round(100 * math.log2(2 ** input_size / occurrence))\n for occurrence in set_of_occurrences})\n self.sbox_mant.append((description, output_id_link))\n","repo_name":"Crypto-TII/claasp","sub_path":"claasp/cipher_modules/models/cp/cp_models/cp_xor_differential_trail_search_model.py","file_name":"cp_xor_differential_trail_search_model.py","file_ext":"py","file_size_in_byte":25125,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"23125267834","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport sys\nsys.path.append('.')\n\nfrom example.issues import print_cchar, print_char\nfrom example.issues import DispatchIssue, dispatch_issue_go\nfrom example.issues import Placeholder, return_vec_of_reference_wrapper\nfrom example.issues import iterator_passthrough\nfrom example.issues import ElementList, ElementA, print_element\nfrom example.issues import expect_float, expect_int\nfrom example.issues import A, call_f\nfrom example.issues import StrIssue\nfrom example.issues import NestA, NestB, NestC, print_NestA, print_NestB, print_NestC\nimport gc\n\nprint_cchar(\"const char *\")\nprint_char('c')\n\n\nclass PyClass1(DispatchIssue):\n def dispatch(self):\n print(\"Yay..\")\n\n\nclass PyClass2(DispatchIssue):\n def dispatch(self):\n try:\n super(PyClass2, self).dispatch()\n except Exception as e:\n print(\"Failed as expected: \" + str(e))\n p = PyClass1()\n dispatch_issue_go(p)\n\nb = PyClass2()\ndispatch_issue_go(b)\n\nprint(return_vec_of_reference_wrapper(Placeholder(4)))\n\nprint(list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))))\n\nel = ElementList()\nfor i in range(10):\n el.add(ElementA(i))\ngc.collect()\nfor i, v in enumerate(el.get()):\n print(\"%i==%i, \" % (i, v.value()), end='')\nprint()\n\ntry:\n print_element(None)\nexcept Exception as e:\n print(\"Failed as expected: \" + str(e))\n\ntry:\n print(expect_int(5.2))\nexcept Exception as e:\n print(\"Failed as expected: \" + str(e))\n\nprint(expect_float(12))\n\nclass B(A):\n def __init__(self):\n super(B, self).__init__()\n\n def f(self):\n print(\"In python f()\")\n\nprint(\"C++ version\")\na = A()\ncall_f(a)\n\nprint(\"Python version\")\nb = B()\ncall_f(b)\n\nprint(StrIssue(3))\ntry:\n print(StrIssue(\"no\", \"such\", \"constructor\"))\nexcept TypeError as e:\n print(\"Failed as expected: \" + str(e))\n\na = NestA()\nb = NestB()\nc = NestC()\na += 10\nb.a += 100\nc.b.a += 1000\nb -= 1\nc.b -= 3\nc *= 7\nprint_NestA(a)\nprint_NestA(b.a)\nprint_NestA(c.b.a)\nprint_NestB(b)\nprint_NestB(c.b)\nprint_NestC(c)\nabase = a.as_base()\nprint(abase.value)\na.as_base().value += 44\nprint(abase.value)\nprint(c.b.a.as_base().value)\nc.b.a.as_base().value += 44\nprint(c.b.a.as_base().value)\ndel c\ngc.collect()\ndel a # Should't delete while abase is still alive\ngc.collect()\nprint(abase.value)\ndel abase\ngc.collect()\n","repo_name":"sangrey/pybind11","sub_path":"example/issues.py","file_name":"issues.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"31743965791","text":"import pathlib\n\nimport numpy as np\nimport pandas as pd\n\nimport plotly.graph_objects as go\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nfrom app import app\nfrom .constants import CATEGORY_COLORS\n\nPATH = pathlib.Path(__file__).parent\nDATA_PATH = PATH.joinpath(\"../datasets\").resolve()\n\nus_data = pd.read_csv(DATA_PATH.joinpath(\"USdata.csv\"), parse_dates=['trending_date', 'publish_time'])\n\ndef display_boxplot_number_trend():\n trend_count = us_data.groupby(['title', 'category_name']).size()\n trend_count = trend_count.reset_index(name='trend_count')\n trend_count = trend_count[trend_count['trend_count'] > 0]\n\n categories = sorted(trend_count['category_name'].unique())\n # Each box is represented by a dict that contains the data, the type, and the colour.\n # Use list comprehension to describe N boxes, each with a different colour and with different randomly generated data:\n fig = go.Figure(data=[go.Box(\n y=trend_count[trend_count['category_name'] == categories[i]]['trend_count'],\n marker_color=CATEGORY_COLORS[i],\n name= categories[i],\n ) for i in range(0, len(categories))])\n\n # format the layout\n fig.update_layout(\n title='Número de vídeos que entraram em trend mais de uma vez',\n xaxis=dict(title='Categorias', showgrid=False, zeroline=False, showticklabels=False),\n yaxis=dict(title='Quantidade de dias em trend', zeroline=False, gridcolor='white'),\n plot_bgcolor='rgb(233,233,233)',\n width=1200,\n height=600\n )\n\n return fig\n\ndef display_boxplot_max_trend_window():\n def biggest_window_trend_time(df):\n max_window = 1\n diff1 = df['trending_date'].diff() == pd.Timedelta('1D')\n \n counter = 0\n for value in diff1:\n counter = counter + 1 if value else 1\n max_window = max(max_window, counter)\n \n return max_window\n\n max_trend_window = us_data.groupby(['title', 'category_name']).apply(biggest_window_trend_time)\n max_trend_window = max_trend_window.to_frame('max_trend_window')\n max_trend_window = max_trend_window.reset_index()\n\n categories = sorted(max_trend_window['category_name'].unique())\n # Each box is represented by a dict that contains the data, the type, and the colour.\n # Use list comprehension to describe N boxes, each with a different colour and with different randomly generated data:\n fig = go.Figure(data=[go.Box(\n y=max_trend_window[max_trend_window['category_name'] == categories[i]]['max_trend_window'],\n marker_color=CATEGORY_COLORS[i],\n name= categories[i],\n ) for i in range(0, len(categories))])\n\n # format the layout\n fig.update_layout(\n title='Maior janela de trend para os vídeos de cada categoria',\n xaxis=dict(title='Categorias', showgrid=False, zeroline=False, showticklabels=False),\n yaxis=dict(title='Tamanho da janela máxima (em dias)', zeroline=False, gridcolor='white'),\n plot_bgcolor='rgb(233,233,233)',\n width=1200,\n height=600\n )\n\n return fig\n\ndef display_boxplot_min_time_to_trend():\n def get_trend_start_time(df):\n return (df['trending_date'].iloc[0] - df['publish_time'].iloc[0]).days\n\n trend_start_time = us_data.groupby(['title', 'publish_time']).apply(get_trend_start_time)\n trend_start_time = trend_start_time.to_frame('trend_start_time')\n trend_start_time = trend_start_time.reset_index()\n trend_start_time = trend_start_time.drop('publish_time', axis=1)\n\n trend_start_time = trend_start_time.merge(us_data[['title', 'category_name']])\n trend_start_time = trend_start_time.drop_duplicates()\n\n categories = sorted(trend_start_time['category_name'].unique())\n # Each box is represented by a dict that contains the data, the type, and the colour.\n # Use list comprehension to describe N boxes, each with a different colour and with different randomly generated data:\n fig = go.Figure(data=[go.Box(\n y=trend_start_time[trend_start_time['category_name'] == categories[i]]['trend_start_time'],\n marker_color=CATEGORY_COLORS[i],\n name= categories[i],\n ) for i in range(0, len(categories))])\n\n # format the layout\n fig.update_layout(\n title='Tempo necessário em dias para o vídeo entrar em trend',\n xaxis=dict(title='Categorias', showgrid=False, zeroline=False, showticklabels=False),\n yaxis=dict(title='Tempo necessário (em dias)', zeroline=False, gridcolor='white'),\n plot_bgcolor='rgb(233,233,233)',\n width=1200,\n height=600\n )\n\n return fig\n\nlayout = html.Div([\n html.H3('Análises de Tempo de Trending (Em Alta)'),\n html.P('Coloque aqui uma breve descrição da visualização, como interagir e insights.'),\n html.Div([\n html.Img(src=app.get_asset_url('trend_count.png'), style={\"width\": \"50%\"}),\n dcc.Graph(id='boxplot-trend-count', figure=display_boxplot_number_trend()),\n html.Img(src=app.get_asset_url('max_trend_window.png'), style={\"width\": \"50%\"}),\n dcc.Graph(id='boxplot-max-trend-window', figure=display_boxplot_max_trend_window()),\n html.Img(src=app.get_asset_url('trend_start_time.png'), style={\"width\": \"50%\"}),\n dcc.Graph(id='boxplot-min-time-to-trend', figure=display_boxplot_min_time_to_trend())\n ], style={\"display\": \"flex\", \"flex-direction\": \"column\", \"align-items\": \"center\"})\n])\n\n# layout = html.Div([\n# html.H3('Análises de Tempo de Trending (Em Alta)'),\n# html.P('Coloque aqui uma breve descrição da visualização, como interagir e insights.'),\n# html.Div([\n# html.Div([\n# html.Img(src=app.get_asset_url('trend_count.png'), style={\"width\": \"45%\"}),\n# html.Img(src=app.get_asset_url('max_trend_window.png'), style={\"width\": \"45%\"}),\n# ], style={\"display\": \"flex\", \"justify-content\": \"space-around\"}),\n# html.Img(src=app.get_asset_url('trend_start_time.png'), style={\"width\": \"50%\"}),\n# ],\n# style={\"display\": \"flex\", \"flex-direction\": \"column\", \"align-items\": \"center\"}\n# )\n# ])","repo_name":"ThiagoPoppe/youtube-visualization","sub_path":"dash-visualization/apps/tempo_de_trending.py","file_name":"tempo_de_trending.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14399319319","text":"import os\nimport json\n\n\ndef form_new(list):\n old_list = list\n new_values = []\n for i in list:\n new_values.append([i[1]])\n sorted_values = sorted(new_values)\n i2 = 0\n while len(old_list) != 0:\n i = 0\n i2 += 1\n while i <= len(sorted_values) - 1:\n if old_list[0][1] == sorted_values[i][0]:\n sorted_values[i].insert(0, old_list[0][0])\n old_list.pop(0)\n break\n i += 1\n return sorted_values\n\n\ndef main_stats(message):\n f_ = open(\"server/plusplus/\" + str(message.server.id) + \".json\", \"r\")\n f = json.loads(f_.read())\n finalString = 'Name' + \" \" * 14 + \"Credits\" + \"\\n\\n\"\n n = form_new(f)\n n.reverse()\n num = 0\n for i in n:\n num += 1\n multi = 18 - len(i[0])\n finalString += str(num) + \". \" + i[0] + \" \" * multi + str(i[1]) + \"\\n\"\n f_.close()\n return [[\"text\", \"```\" + finalString + \"```\"]]\n\n\ndef main_alter(message):\n solved = False\n f = open(\"server/plusplus/\" + str(message.server.id) + \".json\", \"r\")\n s = json.loads(f.read())\n f.close()\n f = open(\"server/plusplus/\" + str(message.server.id) + \".json\", \"w\")\n x = list(message.content)\n x.pop(0)\n x.pop(0)\n x.pop(0)\n y = ''.join(x)\n if message.author.name == y:\n f.write(json.dumps(s))\n f.close()\n return [[\"text\", \"```Nice try \" + y.lower() + \"!```\"]]\n elif message.server.get_member_named(y) == None:\n f.write(json.dumps(s))\n f.close()\n return [[\"text\", \"```Looks like \" + y.lower() + \" is not a member of the server :/```\"]]\n else:\n i = 0\n while i <= len(s) - 1:\n try:\n if y in s[i]:\n if message.content[1] == \"+\":\n s[i][1] += 1\n f.write(json.dumps(s))\n f.close()\n return [[\"text\", \"```\" + y + \" just earned a point!```\"]]\n elif message.content[1] == \"-\":\n s[i][1] -= 1\n f.write(json.dumps(s))\n f.close()\n return [[\"text\", \"```\" + y + \" just lost a point.```\"]]\n solved = True\n break\n except TypeError:\n pass\n i += 1\n if not solved:\n if message.content[1] == \"+\":\n s.append([y, 1])\n f.write(json.dumps(s))\n f.close()\n return [[\"text\", \"```\" + y + \" just earned a point!```\"]]\n elif message.content[1] == \"-\":\n s.append([y, -1])\n f.write(json.dumps(s))\n f.close()\n return [[\"text\", \"```\" + y + \" just lost a point.```\"]]\n","repo_name":"znthlabs/ubi","sub_path":"src/lib/modules/plusplus.py","file_name":"plusplus.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30397526729","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport telebot\nfrom telebot import types\n\nTOKEN = '5911937230:AAG-YqJGOYGS-dWcxome'\nbot = telebot.TeleBot(TOKEN)\n\nreq = requests.get('https://cbr.ru/')\nsoup = BeautifulSoup(req.text, 'html.parser')\nallNews = soup.findAll('div', class_='main-indicator_rates-table')\ndata = soup.findAll('div', class_='col-md-2 col-xs-9 _right mono-num')\nusd, eur, cny = [\n float(re.search(r'(\\d+,?\\d*)\\s*₽', str(data[i * 2])).group(1).replace(',', '.'))\n for i in range(3)\n]\n\n\n@bot.message_handler(content_types=['text'])\ndef get_text_messages(message):\n if message.text == \"/start\":\n\n keyboard = types.InlineKeyboardMarkup()\n key_yes = types.InlineKeyboardButton(text='Доллар', callback_data='usd')\n keyboard.add(key_yes)\n key_no = types.InlineKeyboardButton(text='Евро', callback_data='eur')\n keyboard.add(key_no)\n key_no = types.InlineKeyboardButton(text='Юань', callback_data='cny')\n keyboard.add(key_no)\n bot.send_message(message.from_user.id,\n \"Вас приветствует бот, который поможет вам в конвертации валют!\\nВыберите валюту!\",\n reply_markup=keyboard)\n else:\n bot.send_message(message.from_user.id, \"Я тебя не понимаю. Напиши /start.\")\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_worker(call):\n if call.data not in ('usd', 'eur', 'cny'):\n bot.send_message(call.message.chat.id, 'Не удалось обработать валюту!')\n req = requests.get('https://cbr.ru/')\n soup = BeautifulSoup(req.text, \"html.parser\")\n data = soup.findAll('div', class_='col-md-2 col-xs-9 _right mono-num')\n if len(data) != 6:\n bot.send_message(call.message.chat.id, 'Не удалось обработать команду!')\n helper = {'usd': [0, \"доллар\", 1], 'eur': [2, \"евро\", 1], 'cny': [4, \"юань\", 1]}[call.data]\n info = str(data[helper[0]])\n\n value = float(re.search(r'(\\d+,?\\d*)\\s*₽', info).group(1).replace(',', '.')) / helper[2]\n bot.send_message(call.message.chat.id, f'Сегодня один {helper[1]} стоит {value} рублей.')\n\n\nbot.polling(none_stop=True, interval=0)\n","repo_name":"Mannay12/converter_tg_bot_","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4625225925","text":"\"\"\"Class for lightcurves.\"\"\"\n\nimport logging\nimport itertools\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astroML import time_series\n\nfrom k2spin.config import *\nfrom k2spin import utils\nfrom k2spin import clean\nfrom k2spin import detrend\nfrom k2spin import plot\nfrom k2spin import prot\nfrom k2spin import evaluate\n\nclass LightCurve(object):\n \"\"\"\n Class to contain lightcurves and run analysis. \n\n \"\"\"\n\n def __init__(self, time, flux, unc_flux, x_pos, y_pos, name, \n power_threshold=0.5, detrend_kwargs=None, to_plot=False):\n \"\"\"Clean up the input data and sigma-clip it.\n\n input\n -----\n time, flux, unc_flux: array-like\n the lightcurve\n\n x_pos, y_pos: array-like\n centroid pixel positions\n\n name: string\n\n power_threshold: float (should remove...)\n\n detrend_kwargs: dict\n kind: string (default supersmoother)\n \"supersmoother\",\"boxcar\", or \"linear\" \n phaser: float, optional\n alpha, half-width of smoothing window, or None (respectively)\n\n \"\"\"\n # Save the power threshold for later use\n self.power_threshold = power_threshold\n self.name = name\n\n logging.debug(self.name)\n logging.debug(\"Threshold %f\",self.power_threshold)\n\n # Clean up the input lightcurve\n cleaned_out = clean.prep_lc(time, flux, unc_flux, clip_at=3.)\n self.time, self.flux, self.unc_flux = cleaned_out[:3]\n self.med, self.stdev, all_kept = cleaned_out[3:]\n self.x_pos, self.y_pos = x_pos[all_kept], y_pos[all_kept]\n logging.debug(\"len init t %d f %d u %d\", len(self.time), \n len(self.flux),len(self.unc_flux))\n\n # Detrend the raw flux\n self._bulk_detrend(detrend_kwargs, to_plot)\n\n def choose_initial(self, to_plot=False):\n \"\"\"Search raw and detrended LCs for periods, and decide whether there's\n a period there.\n\n \"\"\"\n # Run a fit on the raw lc\n r_out = self._run_fit(\"raw\")\n raw_fp, raw_power, raw_prots, raw_pgram, raw_alias, raw_sigma = r_out\n logging.debug(\"Ran raw fit\")\n\n # Run a fit on the detrended lc\n d_out = self._run_fit(\"detrended\")\n det_fp, det_power, det_prots, det_pgram, det_alias, det_sigma = d_out\n logging.debug(\"Ran detrended fit\")\n\n # Only consider peaks less than ~half the length of the lightcurve\n# max_peak_loc = 0.75 * (self.time[-1] - self.time[0])\n max_peak_loc = 40\n logging.info(\"Max Prot = %f\", max_peak_loc)\n\n raw_loc2 = np.argmax(raw_pgram[raw_prots (2 * min(periods)):\n# logging.debug(\"fund P %f half P %f\", fund_prot, max(periods))\n half_fund = fund_prot / 2.0\n half_width = fund_prot * 0.01\n half_region = np.where(abs(half_fund - periods) < half_width)[0]\n half_peak = np.argmax(powers[half_region])\n half_per = periods[half_region][half_peak]\n half_pow = powers[half_region][half_peak]\n else:\n half_per, half_pow = np.nan, np.nan\n\n if fund_prot < (0.5 * max(periods)):\n# logging.debug(\"fund P %f max P %f\", fund_prot, max(periods))\n twice_fund = fund_prot * 2.0\n twice_width = twice_fund * 0.01\n twice_region = np.where(abs(twice_fund - periods) < twice_width)[0]\n twice_peak = np.argmax(powers[twice_region])\n twice_per = periods[twice_region][twice_peak]\n twice_pow = powers[twice_region][twice_peak]\n else:\n twice_per, twice_pow = np.nan, np.nan\n\n return half_per, half_pow, twice_per, twice_pow\n\n def _pick_lc(self, fund_power1, fund_power2):\n \"\"\"Pick the raw or detrended lc to continue with by \n selecting the one with the highest peak in the periodogram\n (no consideration of the *locations* of those peaks)\n \"\"\"\n # return a integer indicating which lightcurve to use (1 or 2)\n to_use = 0\n\n if fund_power1 > fund_power2:\n to_use = 1\n elif fund_power2 > fund_power1:\n to_use = 2\n else: # Either something's gone wrong, or they're exactly equal\n to_use = 0\n\n return to_use\n\n def _clean_it(self, use_lc, prot_lims=[0.1,70]):\n \"\"\"Clean all periodic signals from the lightcurve.\"\"\"\n if use_lc==\"raw\":\n logging.debug(\"fitting raw lc\")\n tt, ff, uu = self.time, self.flux, self.unc_flux\n elif (use_lc==\"detrended\") or (use_lc==\"det\"):\n logging.debug(\"fitting detrended lc\")\n tt, ff, uu = self.time, self.det_flux, self.det_unc\n else:\n logging.debug(\"fitting other lc\")\n tt, ff, uu = use_lc\n\n logging.debug(\"_run_fit threshold %f\", self.power_threshold)\n\n # Iteratively smooth, clip, and run a periodogram (period_cleaner)\n dk = {\"filename\":\"{0}plot_outputs/{1}_cleaning\".format(base_path, \n self.name)}\n pc_out = prot.detrend_for_correction(tt, ff, uu,\n prot_lims=prot_lims,\n to_plot=False, \n detrend_kwargs=dk)\n cl_flux, cl_unc = pc_out\n\n return cl_flux, cl_unc\n\n def _xy_correct(self, correct_with=None, n_closest=21):\n \"\"\"Correct for positional variations in the lightcurve once selected.\"\"\"\n \n # Loop through the lightcurve and find the n closest pixels.\n # Then divide by the median flux from those pixels\n\n num_pts = len(self.use_flux)\n logging.debug(\"Pixel position correction %d\", num_pts)\n\n if correct_with is None:\n correct_with = self.use_flux\n\n self.corrected_flux = np.zeros(num_pts)\n self.corrected_unc = np.zeros(num_pts)\n self.median_flux = np.zeros(num_pts)\n\n first_half = self.time<=2264\n x_pos1 = self.x_pos[first_half==True] \n y_pos1 = self.y_pos[first_half==True]\n x_pos2 = self.x_pos[first_half==False]\n y_pos2 = self.y_pos[first_half==False]\n\n for i, fval, xx, yy in itertools.izip(range(num_pts), self.use_flux,\n self.x_pos, self.y_pos):\n logging.debug(i)\n logging.debug(first_half[i])\n if first_half[i]:\n comp_x, comp_y = x_pos1, y_pos1\n comp_f = correct_with[first_half==True]\n else:\n comp_x, comp_y = x_pos2, y_pos2\n comp_f = correct_with[first_half==False]\n\n# comp_x, comp_y = self.x_pos, self.y_pos\n# comp_f = self.use_flux\n\n logging.debug(n_closest)\n pix_sep = np.sqrt((xx - comp_x)**2 + (yy - comp_y)**2)\n min_ind = np.argpartition(pix_sep, n_closest)[:n_closest]\n logging.debug(min_ind)\n logging.debug(np.median(pix_sep[min_ind]))\n\n median_nearest = np.median(comp_f[min_ind])\n #logging.debug(\"This flux %f Median Nearest %f\", \n # fval, median_nearest)\n self.median_flux[i] = median_nearest\n self.corrected_flux[i] = fval / median_nearest\n self.corrected_unc[i] = self.use_unc[i] / median_nearest\n\n logging.debug(\"Correction completed\")\n\n def _plot_xy(self):\n \"\"\"Plot some basic informational plots:\n Flux as a function of X-Y position\n Flux as a function of time\n \"\"\"\n pass\n\n def multi_search(self, to_plot=False):\n \"\"\"Search a lightcurve for a secondary signal.\"\"\"\n # Start with the corrected lightcurve and its associated period\n # Phase on that period and remove it\n white_out = detrend.pre_whiten(self.time, self.corrected_flux, \n self.corrected_unc, self.corr_prot,\n which=\"phased\")\n detrended_flux = self.corrected_flux / white_out[2]\n\n self.corr_trend = white_out[2]\n self.sec_flux = detrended_flux\n self.sec_unc = self.corrected_unc\n\n # Run lomb-scargle again and re-measure the period\n fit_out = self._run_fit([self.time, self.sec_flux, self.sec_unc])\n self.sec_prot = fit_out[0]\n self.sec_power = fit_out[1]\n self.sec_periods = fit_out[2]\n self.sec_pgram = fit_out[3]\n self.sec_sigmas = fit_out[5]\n\n eval_out = evaluate.test_pgram(self.sec_periods, self.sec_pgram,\n self.power_threshold)\n plot_aliases = [None, eval_out[2]]\n\n white_out2 = detrend.pre_whiten(self.time, self.sec_flux, \n self.sec_unc, self.sec_prot,\n which=\"phased\")\n self.sec_trend = white_out2[2]\n\n # Plot!\n if to_plot:\n # Plot them up\n lcs = [[self.time, self.corrected_flux, self.corrected_unc],\n [self.time, self.sec_flux, self.sec_unc]]\n pgrams = [[self.corr_periods, self.corr_pgram], \n [self.sec_periods, self.sec_pgram]]\n best_periods = [self.corr_prot, self.sec_prot]\n data_labels = [\"Corrected\", \"Fund. Prot=\"\n \"{0:.2f}d Removed\".format(self.corr_prot)]\n sigmas = [self.corr_sigmas, self.sec_sigmas]\n rd_fig, rd_axes = plot.compare_multiple(lcs, pgrams, best_periods, \n sigmas, \n aliases=plot_aliases,\n data_labels=data_labels, \n phase_by=self.sec_prot)\n\n rd_fig.suptitle(self.name, fontsize=\"large\", y=0.99)\n\n rd_fig.delaxes(rd_axes[3])\n\n rd_axes[0].plot(self.time, white_out[2], 'b-', lw=2)\n\n plt.savefig(\"{0}plot_outputs/{1}_second_period.png\".format(\n base_path,self.name))\n \n","repo_name":"stephtdouglas/k2spin","sub_path":"lc.py","file_name":"lc.py","file_ext":"py","file_size_in_byte":19676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24194661024","text":"import sys\r\nimport traceback\r\n\r\nimport discord\r\nfrom discord.ext import commands\r\nfrom utils import errors\r\n\r\n\r\nclass CommandErrorHandler(commands.Cog):\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.Cog.listener()\r\n async def on_command_error(self, ctx, error):\r\n command_not_run = \"Command couldn't be run!\"\r\n command_failed = \"Command failed!\"\r\n\r\n cog = ctx.cog\r\n if cog:\r\n if cog._get_overridden_method(cog.cog_command_error) is not None:\r\n return\r\n\r\n ignored_errors = (commands.CommandNotFound,)\r\n error = getattr(error, \"original\", error)\r\n\r\n if isinstance(error, ignored_errors):\r\n return\r\n\r\n if hasattr(ctx.command, \"handled_errors\") and isinstance(error, ctx.command.handled_errors):\r\n return\r\n\r\n if isinstance(error, commands.DisabledCommand):\r\n await ctx.send(embed=discord.Embed(\r\n title=command_not_run,\r\n description=f\"`{self.bot.command_prefix}{ctx.command}` has been disabled.\",\r\n color=discord.Color(0xff0000)))\r\n\r\n elif isinstance(error, commands.NoPrivateMessage):\r\n try:\r\n await ctx.author.send(embed=discord.Embed(\r\n title=command_not_run,\r\n description=f\"`{self.bot.command_prefix}{ctx.command}` cannot be used in Private Messages.\",\r\n color=discord.Color(0xff0000)))\r\n except discord.HTTPException:\r\n pass\r\n\r\n elif isinstance(error, commands.NotOwner):\r\n await ctx.send(embed=discord.Embed(\r\n title=command_not_run,\r\n description=f\"`{self.bot.command_prefix}{ctx.command}` may only be used by the bot owner.\",\r\n color=discord.Color(0xff0000)))\r\n\r\n elif isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(embed=discord.Embed(\r\n title=command_not_run,\r\n description=f\"{error}\\nCommand usage: `{self.bot.command_prefix}{ctx.command.qualified_name} {ctx.command.signature}`\",\r\n color=discord.Color(0xff0000)))\r\n\r\n elif isinstance(error, errors.NotTrustee):\r\n await ctx.send(embed=discord.Embed(\r\n title=command_not_run,\r\n description=f\"`{self.bot.command_prefix}{ctx.command}` may only be used by Trustees or the guild owner.\",\r\n color=discord.Color(0xff0000)))\r\n\r\n elif isinstance(error, errors.CommandFailed):\r\n await ctx.send(embed=discord.Embed(\r\n title=command_failed,\r\n description=str(error),\r\n color=discord.Color(0xff0000)))\r\n\r\n elif isinstance(error, (commands.MissingPermissions, commands.BadArgument, commands.CheckFailure)):\r\n await ctx.send(embed=discord.Embed(\r\n title=command_not_run,\r\n description=str(error),\r\n color=discord.Color(0xff0000)))\r\n\r\n else:\r\n print(f\"\\nIgnoring exception in command {ctx.command}:\", file=sys.stderr)\r\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(CommandErrorHandler(bot))\r\n\r\n\r\ndef teardown(bot):\r\n bot.remove_cog(\"CommandErrorHandler\")\r\n","repo_name":"object-Object/GuildBot","sub_path":"extensions/error_handler.py","file_name":"error_handler.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"27053385977","text":"import logging\nimport sys\n\nfrom Design.Components.Wing import Wing\nfrom Design.Aircraft import Aircraft\n\ndef main():\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n\n logging.getLogger('matplotlib.font_manager').disabled = True\n\n span = [0.1, 0.9, 0.2, 0.1]\n chord = [0.2, 0.2, 0.14, 0.1, 0.05]\n twist = 0\n sweep = [3, 5, 10, 45]\n polyhedral = 1\n\n span_h = [0.2, 0.1]\n chord_h = [0.14, 0.10, 0.06]\n twist_h = 0\n sweep_h = [10]\n polyhedral_h = 1\n\n wing = Wing(sub_type=Wing.SubType.MAIN_WING, span=span, chord=chord, twist=twist, sweep=sweep,\n polyhedral=polyhedral, logger=logger)\n\n horz_stab = Wing(sub_type=Wing.SubType.HORZ_STAB, span=span_h, chord=chord_h, twist=twist_h, sweep=sweep_h,\n polyhedral=polyhedral_h, logger=logger)\n\n logger.info('Wing Area: %sm²' % wing.area)\n logger.info('Wing Projected Area: %sm²' % wing.projected_area)\n logger.info('Wing MAC: %sm' % wing.mac)\n logger.info('Wing AR: %s' % wing.ar)\n\n aircraft = Aircraft(name='Yolo Plane', logger=logger)\n aircraft.add_component(wing, (0,0,0))\n aircraft.add_component(horz_stab, (-1.2,0,0))\n\n Aircraft.plot_planform(aircraft)\n\nmain()","repo_name":"fureter/VentumAeroToolspy","sub_path":"Tester.py","file_name":"Tester.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29079902945","text":"import numpy as np\nfrom qtpy import QtCore\n\nfrom scipy.ndimage import gaussian_filter1d\n\nfrom .RamanModel import RamanModel\nfrom .Spectrum import Spectrum\n\n\nclass DiamondModel(RamanModel):\n pressure_changed = QtCore.Signal(float)\n\n def __init__(self):\n super(DiamondModel, self).__init__()\n\n self._reference_position = 1334.\n self._sample_position = 1334.\n\n def get_pressure(self):\n K = 547\n Kp = 3.75\n\n P = (K * (self.sample_position - self.reference_position) / self.reference_position) * \\\n (1 + 0.5 * (Kp - 1) * (self.sample_position - self.reference_position) / self.reference_position)\n return P\n\n def calculate_derivative_spectrum(self, smoothing):\n if self.spe_file is None:\n return None\n original_spectrum = self.spectrum\n derivative_spectrum = Spectrum(np.copy(original_spectrum.x), np.gradient(original_spectrum.y))\n derivative_spectrum._y = gaussian_filter1d(derivative_spectrum.y, smoothing)\n derivative_spectrum._y = float((max(original_spectrum.y) - min(original_spectrum.y))) / (\n max(derivative_spectrum.y) - min(derivative_spectrum.y)) * derivative_spectrum.y\n derivative_spectrum._y = derivative_spectrum.y + min(original_spectrum.y) - min(derivative_spectrum.y)\n return derivative_spectrum\n\n @property\n def reference_position(self):\n return self._reference_position\n\n @reference_position.setter\n def reference_position(self, value):\n self._reference_position = value\n self.pressure_changed.emit(self.get_pressure())\n\n @property\n def sample_position(self):\n return self._sample_position\n\n @sample_position.setter\n def sample_position(self, value):\n self._sample_position = value\n self.pressure_changed.emit(self.get_pressure())\n","repo_name":"CPrescher/T-rax","sub_path":"t_rax/model/DiamondModel.py","file_name":"DiamondModel.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"20410162070","text":"# Import required modules\r\nfrom selenium import webdriver\r\n\r\n# Set up the WebDriver\r\ndriver = webdriver.Chrome()\r\n\r\n# Navigate to the webpage\r\ndriver.get(\"https://www.example.com\")\r\n\r\n\r\n# Using send_keys()\r\ndef entering_keys():\r\n element = driver.find_element_by_xpath(\"//input[@id='myInput']\")\r\n element.send_keys(\"Hello, World!\")\r\n\r\n\r\n# Using JavaScript executor\r\ndef entering_keys_using_javascript_executor():\r\n element = driver.find_element_by_xpath(\"//input[@id='myInput']\")\r\n driver.execute_script(\"arguments[0].value = 'Hello, World!';\", element)\r\n\r\n\r\n# Close the WebDriver\r\ndriver.quit()\r\n\r\n\"\"\"\r\nYes i could find duplicate code from the above given code :\r\nMethod name : entering_keys , entering_keys_using_javascript_executor are duplicate to each other\r\n\"\"\"\r\n","repo_name":"keshav12-deloitte/Duplicates_Data","sub_path":"DuploChecker/Selenium_duplicates/Selenium_duplicates_data/selenium_SA4.py","file_name":"selenium_SA4.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1094998878","text":"\"\"\"Module for the external comfort package, handling simulation of shaded/\nunshaded surface temperatures in an abstract \"openfield\" condition.\"\"\"\n# pylint: disable=E0401\nimport json\nfrom pathlib import Path\nfrom dataclasses import dataclass\nfrom typing import Any\n\n# pylint: enable=E0401\nfrom caseconverter import pascalcase\nimport numpy as np\nimport pandas as pd\nfrom honeybee.config import folders as hb_folders\nfrom honeybee.model import Model\nfrom honeybee_energy.dictutil import dict_to_material\nfrom honeybee_energy.material.opaque import EnergyMaterial, EnergyMaterialVegetation\nfrom honeybee_energy.run import run_idf, run_osw, to_openstudio_osw\nfrom honeybee_energy.simulation.parameter import (\n ShadowCalculation,\n SimulationControl,\n SimulationOutput,\n SimulationParameter,\n)\nfrom ladybug.epw import EPW, HourlyContinuousCollection\nfrom ladybug.futil import nukedir\nfrom ladybug_comfort.collection.solarcal import OutdoorSolarCal, SolarCalParameter\n\nfrom ..bhom.logging import CONSOLE_LOGGER\nfrom ..bhom.to_bhom import (\n hourlycontinuouscollection_to_bhom,\n material_to_bhom,\n)\nfrom ..honeybee_extension.results import load_sql\nfrom ..ladybug_extension.datacollection import (\n collection_from_series,\n collection_to_series,\n)\nfrom ..ladybug_extension.epw import epw_to_dataframe\nfrom ..ladybug_extension.epw import equality as epw_equality\nfrom ..ladybug_extension.groundtemperature import energyplus_strings\nfrom .model import create_model, get_ground_reflectance, model_equality\nfrom ..helpers import convert_keys_to_snake_case, sanitise_string\nfrom .material import Materials\n\n\ndef simulation_id(\n epw_file: Path,\n ground_material: EnergyMaterial | EnergyMaterialVegetation,\n shade_material: EnergyMaterial | EnergyMaterialVegetation,\n) -> str:\n \"\"\"Create an ID for a simulation.\n\n Args:\n epw_file (Path): The path to the EPW file.\n ground_material (EnergyMaterial | EnergyMaterialVegetation): The ground material.\n shade_material (EnergyMaterial | EnergyMaterialVegetation): The shade material.\n\n Returns:\n str: The simulation ID.\n \"\"\"\n\n epw_id = sanitise_string(epw_file.stem)\n ground_material_id = sanitise_string(ground_material.identifier)\n shade_material_id = sanitise_string(shade_material.identifier)\n return f\"{epw_id}__{ground_material_id}__{shade_material_id}\"\n\n\ndef simulation_directory(model: Model) -> Path:\n \"\"\"Get the working directory (where simulation results will be stored) for the given model, and\n create it if it doesn't already exist.\n\n Args:\n model (Model): A honeybee Model.\n\n Returns:\n Path: The simulation directory associated with the given model.\n \"\"\"\n\n working_dir: Path = Path(hb_folders.default_simulation_folder) / model.identifier\n working_dir.mkdir(parents=True, exist_ok=True)\n\n return working_dir\n\n\ndef simulate_surface_temperatures(\n model: Model, epw_file: Path, remove_dir: bool = False\n) -> dict[str, HourlyContinuousCollection]:\n \"\"\"Simulate surface temperatures for a Honeybee Model and return the\n resulting SQL results file path.\n\n Args:\n model (Model): A honeybee Model.\n epw_file (Path): The path to an EPW file.\n remove_dir (bool, optional): Set to True to remove the simulation\n directory\n\n Returns:\n dict[str, HourlyContinuousCollection]: Surface temperature results.\n \"\"\"\n\n if not isinstance(model, Model):\n raise ValueError(\"model must be a Honeybee Model.\")\n\n epw_file = Path(epw_file)\n if not epw_file.exists():\n raise ValueError(\"epw_file must be a valid file path.\")\n epw = EPW(epw_file)\n\n sim_dir = simulation_directory(model)\n\n # does the epw file already exist in the sim dir\n epws_match = False\n existing_epws = list(sim_dir.glob(\"*.epw\"))\n if len(existing_epws) >= 1:\n for existing_epw in existing_epws:\n if epw_equality(epw, EPW(existing_epw), include_header=True):\n # print(\n # f\"{epw} is the same as {EPW(existing_epw)} ({existing_epw.relative_to(sim_dir)})\"\n # )\n epws_match = True\n else:\n existing_epw.unlink()\n saved_epw = (sim_dir / epw_file.name).as_posix()\n epw.save(saved_epw)\n\n # do the models match\n models_match = False\n existing_models = list(sim_dir.glob(\"*.hbjson\"))\n if len(existing_models) >= 1:\n for existing_model in existing_models:\n if model_equality(model, Model.from_hbjson(existing_model)):\n models_match = True\n else:\n existing_model.unlink()\n\n # does the sql_path exist\n sql_path = sim_dir / \"run\" / \"eplusout.sql\"\n sql_exists = sql_path.exists()\n\n # check for existing results and reload if they exist\n if not all(\n [\n sql_exists,\n models_match,\n epws_match,\n ]\n ):\n CONSOLE_LOGGER.info(f\"Simulating {model.identifier}\")\n model_json = sim_dir / f\"{model.identifier}.hbjson\"\n with open(model_json, \"w\", encoding=\"utf-8\") as fp:\n json.dump(model.to_dict(triangulate_sub_faces=True), fp)\n\n sim_par = SimulationParameter(\n output=SimulationOutput(\n outputs=[\"Surface Outside Face Temperature\"],\n include_sqlite=True,\n summary_reports=None,\n include_html=False,\n ),\n simulation_control=SimulationControl(\n do_zone_sizing=False,\n do_system_sizing=False,\n do_plant_sizing=False,\n run_for_sizing_periods=False,\n run_for_run_periods=True,\n ),\n shadow_calculation=ShadowCalculation(\n solar_distribution=\"FullExteriorWithReflections\",\n calculation_method=\"PolygonClipping\",\n calculation_update_method=\"Periodic\",\n maximum_figures=200,\n ),\n terrain_type=\"Country\",\n timestep=10,\n )\n sim_par_json = sim_dir / \"simulation_parameter.json\"\n with open(sim_par_json, \"w\", encoding=\"utf-8\") as fp:\n json.dump(sim_par.to_dict(), fp)\n\n osw = to_openstudio_osw(\n sim_dir.as_posix(),\n model_json.as_posix(),\n sim_par_json.as_posix(),\n additional_measures=None,\n epw_file=epw.file_path,\n )\n\n _, idf = run_osw(osw, silent=True)\n\n with open(idf, \"r\", encoding=\"utf-8\") as fp:\n idf_string = fp.read()\n idf_string += f\"\\n\\n{energyplus_strings(epw)}\"\n\n with open(idf, \"w\", encoding=\"utf-8\") as fp:\n idf_string = fp.write(idf_string)\n\n run_idf(idf, epw.file_path, silent=False)\n\n else:\n CONSOLE_LOGGER.info(f\"Reloading {model.identifier}\")\n\n df = load_sql(sql_path)\n\n if remove_dir:\n nukedir(sim_dir, rmdir=True)\n\n return {\n \"shaded_down_temperature\": collection_from_series(\n df.filter(regex=\"GROUND_ZONE_UP_SHADED\")\n .droplevel([0, 1, 2], axis=1)\n .squeeze()\n .rename(\"Ground Temperature (C)\")\n ),\n \"unshaded_down_temperature\": collection_from_series(\n df.filter(regex=\"GROUND_ZONE_UP_UNSHADED\")\n .droplevel([0, 1, 2], axis=1)\n .squeeze()\n .rename(\"Ground Temperature (C)\")\n ),\n \"shaded_up_temperature\": collection_from_series(\n df.filter(regex=\"SHADE_ZONE_DOWN\")\n .droplevel([0, 1, 2], axis=1)\n .squeeze()\n .rename(\"Sky Temperature (C)\")\n ),\n \"unshaded_up_temperature\": epw.sky_temperature,\n }\n\n\ndef radiant_temperature(\n collections: list[HourlyContinuousCollection], view_factors: list[float] = None\n) -> HourlyContinuousCollection:\n \"\"\"Calculate the MRT from a list of surface temperature collections, and view\n factors to each of those surfaces.\n\n Args:\n collections (List[HourlyContinuousCollection]):\n A list of hourly continuous collections.\n view_factors (List[float]):\n A list of view factors to each of the collections.\n If None, then all input collections are weighted equally.\n\n Returns:\n HourlyContinuousCollection:\n An HourlyContinuousCollection of the effective radiant temperature.\n \"\"\"\n\n if view_factors is None:\n view_factors = [1 / len(collections)] * len(collections)\n if len(collections) != len(view_factors):\n raise ValueError(\"The number of collections and view factors must be the same.\")\n if sum(view_factors) != 1:\n raise ValueError(\"The sum of view factors must be 1.\")\n\n mrt_series = (\n np.power(\n (\n np.power(\n pd.concat([collection_to_series(i) for i in collections], axis=1)\n + 273.15,\n 4,\n )\n * view_factors\n ).sum(axis=1),\n 0.25,\n )\n - 273.15\n )\n mrt_series.name = \"Radiant Temperature (C)\"\n return collection_from_series(mrt_series)\n\n\n_ATTRIBUTES = [\n \"shaded_down_temperature\",\n \"shaded_up_temperature\",\n \"unshaded_down_temperature\",\n \"unshaded_up_temperature\",\n \"shaded_radiant_temperature\",\n \"shaded_longwave_mean_radiant_temperature_delta\",\n \"shaded_shortwave_mean_radiant_temperature_delta\",\n \"shaded_mean_radiant_temperature\",\n \"unshaded_radiant_temperature\",\n \"unshaded_longwave_mean_radiant_temperature_delta\",\n \"unshaded_shortwave_mean_radiant_temperature_delta\",\n \"unshaded_mean_radiant_temperature\",\n]\n\n\n@dataclass(init=True, repr=True, eq=True)\nclass SimulationResult:\n \"\"\"_\"\"\"\n\n epw_file: Path\n ground_material: EnergyMaterial | EnergyMaterialVegetation\n shade_material: EnergyMaterial | EnergyMaterialVegetation\n identifier: str = None\n\n shaded_down_temperature: HourlyContinuousCollection = None\n shaded_up_temperature: HourlyContinuousCollection = None\n\n unshaded_down_temperature: HourlyContinuousCollection = None\n unshaded_up_temperature: HourlyContinuousCollection = None\n\n shaded_radiant_temperature: HourlyContinuousCollection = None\n shaded_longwave_mean_radiant_temperature_delta: HourlyContinuousCollection = None\n shaded_shortwave_mean_radiant_temperature_delta: HourlyContinuousCollection = None\n shaded_mean_radiant_temperature: HourlyContinuousCollection = None\n\n unshaded_radiant_temperature: HourlyContinuousCollection = None\n unshaded_longwave_mean_radiant_temperature_delta: HourlyContinuousCollection = None\n unshaded_shortwave_mean_radiant_temperature_delta: HourlyContinuousCollection = None\n unshaded_mean_radiant_temperature: HourlyContinuousCollection = None\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.identifier})\"\n\n def __post_init__(self):\n \"\"\"_\"\"\"\n\n # validation\n if not isinstance(self.epw_file, (Path, str)):\n raise ValueError(\"epw_file must be a Path or str.\")\n self.epw_file = Path(self.epw_file).resolve()\n if not self.epw_file.exists():\n raise ValueError(\"epw_file does not exist.\")\n\n if isinstance(self.ground_material, Materials):\n self.ground_material = self.ground_material.value\n if isinstance(self.shade_material, Materials):\n self.shade_material = self.shade_material.value\n\n if not isinstance(\n self.ground_material, (EnergyMaterial, EnergyMaterialVegetation)\n ):\n raise ValueError(\n \"ground_material must be an EnergyMaterial or EnergyMaterialVegetation.\"\n )\n if not isinstance(\n self.shade_material, (EnergyMaterial, EnergyMaterialVegetation)\n ):\n raise ValueError(\n \"shade_material must be an EnergyMaterial or EnergyMaterialVegetation.\"\n )\n\n if self.identifier is None:\n self.identifier = simulation_id(\n self.epw_file, self.ground_material, self.shade_material\n )\n\n for attr in _ATTRIBUTES:\n if not isinstance(\n getattr(self, attr), (HourlyContinuousCollection, type(None))\n ):\n raise ValueError(\n f\"{attr} must be either an HourlyContinuousCollection, or None.\"\n )\n\n # run simulation and populate object with results if not already done\n _epw = EPW(self.epw_file)\n _model = create_model(\n identifier=self.identifier,\n ground_material=self.ground_material,\n shade_material=self.shade_material,\n )\n\n if not all(\n [\n self.shaded_down_temperature,\n self.unshaded_down_temperature,\n self.shaded_up_temperature,\n self.unshaded_up_temperature,\n ]\n ):\n results = simulate_surface_temperatures(\n model=_model,\n epw_file=self.epw_file,\n remove_dir=not bool(self.identifier),\n )\n for k, v in results.items():\n if isinstance(getattr(self, k), HourlyContinuousCollection):\n continue\n setattr(self, k, v)\n\n # calculate other variables\n self.shaded_radiant_temperature = radiant_temperature(\n [\n self.shaded_down_temperature,\n self.shaded_up_temperature,\n ],\n )\n self.unshaded_radiant_temperature = radiant_temperature(\n [\n self.unshaded_down_temperature,\n self.unshaded_up_temperature,\n ],\n )\n\n # calculate MRT\n params = SolarCalParameter()\n shaded_cal = OutdoorSolarCal(\n location=_epw.location,\n direct_normal_solar=_epw.direct_normal_radiation,\n diffuse_horizontal_solar=_epw.diffuse_horizontal_radiation,\n horizontal_infrared=_epw.horizontal_infrared_radiation_intensity,\n surface_temperatures=self.shaded_radiant_temperature,\n floor_reflectance=get_ground_reflectance(_model),\n sky_exposure=0,\n fraction_body_exposed=0,\n solarcal_body_parameter=params,\n )\n unshaded_cal = OutdoorSolarCal(\n location=_epw.location,\n direct_normal_solar=_epw.direct_normal_radiation,\n diffuse_horizontal_solar=_epw.diffuse_horizontal_radiation,\n horizontal_infrared=_epw.horizontal_infrared_radiation_intensity,\n surface_temperatures=self.unshaded_down_temperature,\n floor_reflectance=get_ground_reflectance(_model),\n sky_exposure=1,\n fraction_body_exposed=1,\n solarcal_body_parameter=params,\n )\n for shadedness, cal in list(\n zip(*[[\"shaded\", \"unshaded\"], [shaded_cal, unshaded_cal]])\n ):\n for var in [\n \"mean_radiant_temperature\",\n \"shortwave_mrt_delta\",\n \"longwave_mrt_delta\",\n ]:\n setattr(\n self,\n f\"{shadedness}_{var.replace('mrt', 'mean_radiant_temperature')}\",\n getattr(cal, var),\n )\n\n # add some accessors for collections as series\n for attr in _ATTRIBUTES:\n setattr(self, f\"{attr}_series\", collection_to_series(getattr(self, attr)))\n\n def to_dict(self) -> dict[str, Any]:\n \"\"\"Convert this object to a dictionary.\"\"\"\n ground_material_dict = self.ground_material.to_dict()\n shade_material_dict = self.shade_material.to_dict()\n\n attr_dict = {}\n for attr in _ATTRIBUTES:\n if getattr(self, attr):\n attr_dict[attr] = getattr(self, attr).to_dict()\n\n d = {\n **{\n \"type\": \"SimulationResult\",\n \"epw_file\": self.epw_file.as_posix(),\n \"ground_material\": ground_material_dict,\n \"shade_material\": shade_material_dict,\n \"identifier\": self.identifier,\n },\n **attr_dict,\n }\n\n return d\n\n @classmethod\n def from_dict(cls, d: dict[str, Any]) -> \"SimulationResult\":\n \"\"\"Create this object from a dictionary.\"\"\"\n if isinstance(d[\"ground_material\"], dict):\n d[\"ground_material\"] = dict_to_material(d[\"ground_material\"])\n\n if isinstance(d[\"shade_material\"], dict):\n d[\"shade_material\"] = dict_to_material(d[\"shade_material\"])\n\n for attr in _ATTRIBUTES:\n if d.get(attr, None):\n if isinstance(d[attr], dict):\n d[attr] = HourlyContinuousCollection.from_dict(d[attr])\n else:\n d[attr] = None\n\n return cls(\n epw_file=d[\"epw_file\"],\n ground_material=d[\"ground_material\"],\n shade_material=d[\"shade_material\"],\n identifier=d[\"identifier\"],\n shaded_down_temperature=d[\"shaded_down_temperature\"],\n shaded_up_temperature=d[\"shaded_up_temperature\"],\n unshaded_down_temperature=d[\"unshaded_down_temperature\"],\n unshaded_up_temperature=d[\"unshaded_up_temperature\"],\n shaded_radiant_temperature=d[\"shaded_radiant_temperature\"],\n shaded_longwave_mean_radiant_temperature_delta=d[\n \"shaded_longwave_mean_radiant_temperature_delta\"\n ],\n shaded_shortwave_mean_radiant_temperature_delta=d[\n \"shaded_shortwave_mean_radiant_temperature_delta\"\n ],\n shaded_mean_radiant_temperature=d[\"shaded_mean_radiant_temperature\"],\n unshaded_radiant_temperature=d[\"unshaded_radiant_temperature\"],\n unshaded_longwave_mean_radiant_temperature_delta=d[\n \"unshaded_longwave_mean_radiant_temperature_delta\"\n ],\n unshaded_shortwave_mean_radiant_temperature_delta=d[\n \"unshaded_shortwave_mean_radiant_temperature_delta\"\n ],\n unshaded_mean_radiant_temperature=d[\"unshaded_mean_radiant_temperature\"],\n )\n\n def to_json(self) -> str:\n \"\"\"Create a JSON string from this object.\"\"\"\n return json.dumps(self.to_dict())\n\n @classmethod\n def from_json(cls, json_string: str) -> \"SimulationResult\":\n \"\"\"Create this object from a JSON string.\"\"\"\n\n return cls.from_dict(json.loads(json_string))\n\n def to_file(self, path: Path) -> Path:\n \"\"\"Write this object to a JSON file.\"\"\"\n\n if Path(path).suffix != \".json\":\n raise ValueError(\"path must be a JSON file.\")\n\n with open(Path(path), \"w\") as fp:\n fp.write(self.to_json())\n\n return Path(path)\n\n @classmethod\n def from_file(cls, path: Path) -> \"SimulationResult\":\n \"\"\"Create this object from a JSON file.\"\"\"\n\n with open(Path(path), \"r\") as fp:\n return cls.from_json(fp.read())\n\n @property\n def epw(self) -> EPW:\n \"\"\"Return the EPW object associated with this simulation result.\"\"\"\n return EPW(self.epw_file)\n\n @property\n def simulation_directory(self) -> Path:\n \"\"\"Return the simulation directory for this simulation result.\"\"\"\n return simulation_directory(self.model)\n\n @property\n def model(self) -> Model:\n \"\"\"Return the model object for this simulation result.\"\"\"\n return create_model(\n identifier=self.identifier,\n ground_material=self.ground_material,\n shade_material=self.shade_material,\n )\n\n def to_dataframe(self) -> pd.DataFrame:\n \"\"\"Create a Pandas DataFrame from this object.\n\n Returns:\n pd.DataFrame: Represent this object as a Pandas DataFrame.\n \"\"\"\n\n obj_series = []\n for var in dir(self):\n for shadedness in [\"shaded\", \"unshaded\"]:\n if not var.startswith(shadedness):\n continue\n _temp = getattr(self, var)\n if isinstance(_temp, HourlyContinuousCollection):\n _temp = collection_to_series(_temp)\n _temp.rename(\n (shadedness.title(), _temp.name),\n inplace=True,\n )\n obj_series.append(_temp)\n\n obj_df = pd.concat(obj_series, axis=1)\n\n return pd.concat(\n [\n pd.concat(\n [epw_to_dataframe(self.epw, include_additional=True)],\n axis=1,\n keys=[\"EPW\"],\n ),\n obj_df,\n ],\n axis=1,\n )\n\n def description(self, include_shade_material: bool = True) -> str:\n \"\"\"Create the description for this object.\n\n Args:\n include_shade_material (bool, optional):\n Set to False to exclude the shade material from the description.\n Defaults to True.\n\n Returns:\n str:\n A description of this object.\n \"\"\"\n if include_shade_material:\n return (\n f\"{self.epw_file.name} - \"\n f\"{self.ground_material.identifier} (ground material) - \"\n f\"{self.shade_material.identifier} (shade material)\"\n )\n\n return (\n f\"{self.epw_file.name} - \"\n f\"{self.ground_material.identifier} (ground material)\"\n )\n","repo_name":"BHoM/LadybugTools_Toolkit","sub_path":"LadybugTools_Engine/Python/src/ladybugtools_toolkit/external_comfort/_simulatebase.py","file_name":"_simulatebase.py","file_ext":"py","file_size_in_byte":21640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"36090865910","text":"# def max_magnitude(l):\n# return abs(max(l, key=lambda n: abs(n)))\n\n\ndef max_magnitude(l):\n return max(abs(n) for n in l)\n\n\ndef sum_even_values(*args):\n return sum(n for n in args if n % 2 == 0)\n\n\ndef interleave(s1, s2):\n return ''.join([''.join(t) for t in zip(s1, s2)])\n\n\n\nnum = [300, 20, -900]\nprint(max_magnitude(num))\nsoma = (sum_even_values(1, 2, 3, 4, 5, 6))\nprint(soma)\n\nprint('- ' * 15)\nst1 = 'aaa'\nst2 = 'zzz'\nprint(interleave(st1, st2))\nprint('- ' * 15)\nnums = [1, 2, 3, 4, 5]\n# new_nums = [n * 3 for n in nums if n % 4 == 0]\nnew_nums = list(\n map(lambda n: n * 3,\n filter(lambda x: x % 4 == 0, nums)\n )\n)\nprint(new_nums)\n\nprint('- ' * 15)\nnames = [\n {'first': 'Elie', 'last': 'Schoppik'},\n {'first': 'Colt', 'last': 'Steele'}\n]\n# full_names = [f'{n[\"first\"]} {n[\"last\"]}' for n in names]\ntest = list(\n map(\n lambda n: ' '.join(n.values()), names\n )\n)\nprint(test)\n\n\n","repo_name":"mhiloca/PythonBootcamp","sub_path":"lambdas_builtin_functions/exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74810008585","text":"import os\nimport re\nimport sys\nimport zlib\nimport time\nimport copy\nimport random\nimport textwrap\nimport binascii\nimport socket\nimport afpacket\nimport traceback\nimport ipaddress\n\nfrom binascii import hexlify\n\nthis_dir = os.path.join(os.path.dirname(__file__))\n\nfrom scapy.all import hexdump, L2Socket\nfrom scapy.packet import Padding\nfrom scapy.layers.l2 import Ether, Dot1Q, ARP\nfrom scapy.layers.inet import IP, UDP, TCP, ICMP\nfrom scapy.layers.inet6 import IPv6, ICMPv6ND_NA\nfrom scapy.contrib.igmp import IGMP\nfrom scapy.config import Conf\nfrom dicts import SpyTestDict\nfrom utils import Utils\nfrom logger import Logger\n\n#dbg > 1 --- recv/send packet\n#dbg > 2 --- recv/send packet summary\n#dbg > 3 --- recv/send packet hex\n\nstale_list_ignore = [\n \"port_handle\",\n \"port_handle2\",\n \"stream_id\",\n \"mode\",\n \"rate_percent\", #TODO\n\n #\n \"circuit_endpoint_type\",\n \"enable_stream_only_gen\",\n #\n\n # filtered stats\n \"high_speed_result_analysis\", #TODO\n \"vlan_id_tracking\",\n \"ip_dscp_tracking\",\n \"track_by\",\n # filtered stats\n\n \"ip_protocol\", #TODO\n\n \"vlan_id\",\n \"vlan_id_mode\",\n \"vlan_id_step\",\n \"vlan_id_count\",\n\n \"mac_src\",\n \"mac_src_mode\",\n \"mac_src_step\",\n \"mac_src_count\",\n \"mac_dst\",\n \"mac_dst_mode\",\n \"mac_dst_step\",\n \"mac_dst_count\",\n\n \"udp_src_port\",\n \"udp_src_port_mode\",\n \"udp_src_port_step\",\n \"udp_src_port_count\",\n \"udp_dst_port\",\n \"udp_dst_port_mode\",\n \"udp_dst_port_step\",\n \"udp_dst_port_count\",\n\n \"tcp_src_port\",\n \"tcp_src_port_mode\",\n \"tcp_src_port_step\",\n \"tcp_src_port_count\",\n \"tcp_dst_port\",\n \"tcp_dst_port_mode\",\n \"tcp_dst_port_step\",\n \"tcp_dst_port_count\",\n\n \"arp_src_hw_addr\",\n \"arp_src_hw_mode\",\n \"arp_src_hw_step\",\n \"arp_src_hw_count\",\n \"arp_dst_hw_addr\",\n \"arp_dst_hw_mode\",\n \"arp_dst_hw_step\",\n \"arp_dst_hw_count\",\n\n \"ip_src_addr\",\n \"ip_src_mode\",\n \"ip_src_step\",\n \"ip_src_count\",\n \"ip_dst_addr\",\n \"ip_dst_mode\",\n \"ip_dst_step\",\n \"ip_dst_count\",\n\n \"ipv6_src_addr\",\n \"ipv6_src_mode\",\n \"ipv6_src_step\",\n \"ipv6_src_count\",\n \"ipv6_dst_addr\",\n \"ipv6_dst_mode\",\n \"ipv6_dst_step\",\n \"ipv6_dst_count\",\n]\n\nclass ScapyPacket(object):\n\n def __init__(self, iface, dbg=0, dry=False, hex=False, logger=None):\n self.dry = dry\n self.logger = logger or Logger()\n try: self.logger.info(\"SCAPY VERSION = {}\".format(Conf().version))\n except: self.logger.info(\"SCAPY VERSION = UNKNOWN\")\n self.utils = Utils(self.dry, logger=self.logger)\n self.max_rate_pps = self.utils.get_env_int(\"SPYTEST_SCAPY_MAX_RATE_PPS\", 100)\n self.dbg = dbg\n self.hex = hex\n self.iface = iface\n self.tx_count = 0\n self.rx_count = 0\n self.rx_sock = None\n self.tx_sock = None\n self.finished = False\n self.cleanup()\n if iface and not self.dry:\n self.init_bridge(iface)\n bridge = \"{0}-br\".format(iface)\n #os.system(\"brctl addbr {0}\".format(bridge))\n os.system(\"ip link add name {0} type bridge\".format(bridge))\n #os.system(\"brctl addif {0} {1}\".format(bridge, iface))\n os.system(\"ip link set dev {1} master {0}\".format(bridge, iface))\n # let bridge proxy arp packets\n #os.system(\"echo 1 > /proc/sys/net/ipv4/conf/{0}-br/proxy_arp\".format(iface))\n os.system(\"ip link set dev {0}-br up\".format(iface))\n os.system(\"ip link set dev {0} up\".format(iface))\n os.system(\"ip link set dev {0} promisc on\".format(iface))\n os.system(\"ip link set dev {0} mtu 9194\".format(iface))\n os.system(\"ip link set dev {0}-br mtu 9194\".format(iface))\n os.system(\"echo 0 > /sys/class/net/{0}-br/bridge/multicast_snooping\".format(iface))\n #os.system(\"ip link del {0}-rx\".format(iface))\n os.system(\"ip link add {0}-rx type dummy\".format(iface))\n os.system(\"ip link set dev {0}-rx mtu 9194\".format(iface))\n self.disable_ra_accept(\"{0}-rx\".format(iface))\n os.system(\"tc qdisc del dev {0} ingress\".format(iface))\n os.system(\"tc qdisc add dev {0} ingress\".format(iface))\n #os.system(\"tc filter del dev {0} parent ffff: protocol all u32 match u8 0 0 action mirred egress mirror dev {0}-rx\".format(iface))\n os.system(\"tc filter add dev {0} parent ffff: protocol all u32 match u8 0 0 action mirred egress mirror dev {0}-rx\".format(iface))\n os.system(\"ip link set {0}-rx up\".format(iface))\n if self.dbg > 2:\n os.system(\"ifconfig\")\n self.rx_open()\n\n def init_bridge(self, iface):\n if iface and not self.dry:\n bridge = \"{0}-br\".format(iface)\n os.system(\"ip link set dev {0} down\".format(bridge))\n #os.system(\"brctl delif {0} {1}\".format(bridge, iface))\n os.system(\"ip link set dev {1} nomaster\".format(bridge, iface))\n #os.system(\"brctl delbr {0}\".format(bridge))\n os.system(\"ip link del {0}\".format(bridge))\n os.system(\"ip link del {0}-rx\".format(iface))\n time.sleep(1)\n\n def disable_ra_accept(self, iface):\n os.system(\"sysctl -w net.ipv6.conf.{}.forwarding=0\".format(iface))\n os.system(\"sysctl -w net.ipv6.conf.all.forwarding=0\")\n os.system(\"sysctl -w net.ipv6.conf.{}.accept_ra=0\".format(iface))\n os.system(\"sysctl -w net.ipv6.conf.all.accept_ra=0\")\n os.system(\"sysctl -w net.ipv6.conf.{}.forwarding=0\".format(iface))\n\n def __del__(self):\n self.logger.info(\"packet cleanup todo: \", self.iface)\n self.cleanup()\n\n def clear_stats(self):\n self.tx_count = 0\n self.rx_count = 0\n\n def close_sock(self, sock):\n try: sock.close()\n except: pass\n return None\n\n def cleanup(self):\n self.logger.info(\"ScapyPacket {} cleanup...\".format(self.iface))\n self.finished = True\n self.init_bridge(self.iface)\n self.finished = False\n self.rx_sock = self.close_sock(self.rx_sock)\n self.tx_sock = self.close_sock(self.tx_sock)\n\n def rx_open(self):\n if not self.iface or self.dry: return\n ETH_P_ALL = 3\n self.rx_sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(ETH_P_ALL))\n self.rx_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 12 * 1024)\n self.rx_sock.bind((self.iface+\"-rx\", 3))\n afpacket.enable_auxdata(self.rx_sock)\n\n def readp(self, iface):\n\n if self.dry:\n time.sleep(2)\n return None\n\n if not self.iface:\n return None\n\n try:\n data = afpacket.recv(self.rx_sock, 12 * 1024)\n except Exception as exp:\n if self.finished:\n return None\n raise exp\n packet = Ether(data)\n self.rx_count = self.rx_count + 1\n self.trace_stats()\n\n if self.dbg > 1:\n self.logger.debug(\"readp: {} len: {} count: {}\".format(iface, len(data), self.rx_count))\n\n if self.dbg > 2:\n self.trace_packet(packet, self.hex)\n\n return packet\n\n def sendp(self, data, iface):\n self.tx_count = self.tx_count + 1\n self.trace_stats()\n\n if self.dbg > 1:\n self.logger.debug(\"sendp: {} len: {} count: {}\".format(iface, len(data), self.tx_count))\n\n if self.dbg > 2:\n self.trace_packet(data, self.hex)\n\n if not self.dry:\n #sendp(data, iface=iface, verbose=False)\n if not self.tx_sock:\n self.tx_sock = L2Socket(iface)\n self.tx_sock.send(data)\n\n def trace_stats(self):\n #self.logger.debug(\"Name: {} RX: {} TX: {}\".format(self.iface, self.rx_count, self.tx_count))\n pass\n\n def show_pkt(self, pkt):\n try:\n self.logger.debug(pkt.show2(dump=True))\n except:\n self.logger.debug(pkt.command())\n pkt.show2()\n\n def trace_packet(self, pkt, hex=True, fields=True):\n if not fields and not hex: return\n if isinstance(pkt, str): pkt = Ether(pkt)\n if fields: self.show_pkt(pkt)\n if hex: hexdump(pkt)\n\n def send_packet(self, pwa, iface):\n if pwa.padding:\n strpkt = str(pwa.pkt/pwa.padding)\n else:\n strpkt = str(pwa.pkt)\n pkt_bytes = self.utils.tobytes(strpkt)\n try:\n crc1 = '{:08x}'.format(socket.htonl(zlib.crc32(pkt_bytes) & 0xFFFFFFFF))\n crc = binascii.unhexlify(crc1)\n except:\n crc1='{:08x}'.format(socket.htonl(zlib.crc32(pkt_bytes) & 0xFFFFFFFF))\n crc = binascii.unhexlify('00' * 4)\n bstr = bytes(strpkt+crc)\n self.sendp(bstr, iface)\n return bstr\n\n def check(self, pkt):\n pkt.do_build()\n if self.dbg > 3:\n self.show_pkt(pkt)\n return pkt\n\n def pop_mac(self, d, prop, default):\n val = d.pop(prop, None)\n if not val:\n val = \"{}\".format(default)\n if isinstance(val, list):\n values = val\n else:\n values = val.split(\" \")\n for index,val in enumerate(values):\n values[index] = val.replace(\".\", \":\")\n return values\n\n def pop_str(self, d, prop, default):\n val = d.pop(prop, None)\n if not val:\n val = \"{}\".format(default)\n return val\n\n def pop_int(self, d, prop, default):\n val = d.pop(prop, \"{}\".format(default))\n try:\n return int(str(val))\n except:\n self.logger.info(traceback.format_exc())\n\n def pop_hex(self, d, prop, default):\n val = d.pop(prop, \"{}\".format(default))\n try:\n return int(str(val), 16)\n except:\n self.logger.info(traceback.format_exc())\n\n def get_int(self, d, prop, default):\n val = d.get(prop, \"{}\".format(default))\n try:\n return int(str(val))\n except:\n self.logger.info(traceback.format_exc())\n\n def ensure_int(self, name, value, min_val, max_val):\n if value < 0 or value > 13312:\n msg = \"invalid value {} = {} shoud be > {} and < {}\"\n msg = msg.format(name, value, min_val, max_val)\n self.logger.error(msg)\n\n def build_udp(self, kws):\n udp = UDP()\n udp.sport = self.pop_int(kws, \"udp_src_port\", 0)\n udp.dport = self.pop_int(kws, \"udp_dst_port\", 0)\n #udp.len\n #udp.chksum\n return udp\n\n def build_tcp(self, kws):\n tcp = TCP()\n tcp.sport = self.pop_int(kws, \"tcp_src_port\", 0)\n tcp.dport = self.pop_int(kws, \"tcp_dst_port\", 0)\n tcp.seq = self.pop_int(kws, \"tcp_seq_num\", 0)\n tcp.ack = self.pop_int(kws, \"tcp_ack_num\", 0)\n tcp.flags = ''\n if self.pop_int(kws, \"tcp_syn_flag\", 0):\n tcp.flags = str(tcp.flags) + 'S'\n if self.pop_int(kws, \"tcp_fin_flag\", 0):\n tcp.flags = str(tcp.flags) + 'F'\n if self.pop_int(kws, \"tcp_urg_flag\", 0):\n tcp.flags = str(tcp.flags) + 'U'\n if self.pop_int(kws, \"tcp_psh_flag\", 0):\n tcp.flags = str(tcp.flags) + 'P'\n if self.pop_int(kws, \"tcp_ack_flag\", 0):\n tcp.flags = str(tcp.flags) + 'A'\n if self.pop_int(kws, \"tcp_rst_flag\", 0):\n tcp.flags = str(tcp.flags) + 'R'\n #tcp.dataofs\n #tcp.reserved\n tcp.window = self.pop_int(kws, \"tcp_window\", 0)\n #tcp.chksum\n #tcp.urgptr\n #tcp.options\n return tcp\n\n def build_icmp(self, kws):\n icmp = ICMP()\n icmp.type = self.pop_int(kws, \"icmp_type\", 0)\n #icmp.code = Driver.getConstValue(icmpCfg.code)\n #TODO: icmp_type_count, icmp_type_mode\n return icmp\n\n def build_icmp6(self, kws):\n icmp_type = self.pop_int(kws, \"icmp_type\", 0)\n if icmp_type == 136:\n icmp = ICMPv6ND_NA()\n icmp.R = self.pop_int(kws, \"icmp_ndp_nam_r_flag\", 1)\n icmp.S = self.pop_int(kws, \"icmp_ndp_nam_s_flag\", 0)\n icmp.O = self.pop_int(kws, \"icmp_ndp_nam_o_flag\", 1)\n icmp.tgt = self.pop_str(kws, \"icmp_target_addr\", \"::\")\n else:\n icmp = ICMP()\n return icmp\n\n def build_igmp(self, kws):\n igmp = IGMP()\n #igmp.mrcode\n igmp.gaddr = self.pop_str(kws, \"igmp_group_addr\", \"0.0.0.0\")\n #igmp.chksum = 0\n igmp_msg_type = self.pop_str(kws, \"igmp_msg_type\", \"report\")\n if igmp_msg_type == \"report\":\n igmp.type = 0x16\n elif igmp_msg_type == \"query\":\n igmp.type = 0x11\n elif igmp_msg_type == \"leave\":\n igmp.type = 0x17\n else:\n self.logger.todo(\"unknown\", \"igmp_msg_type\", igmp_msg_type)\n igmp = None\n return igmp\n\n def fill_emulation_params(self, stream):\n\n # read params from emulation interfaces\n if \"emulation_src_handle\" in stream.kws:\n emulation_src_handle = stream.kws.pop(\"emulation_src_handle\")\n intf_ip_addr = emulation_src_handle.kws.get(\"intf_ip_addr\", \"0.0.0.0\")\n ipv6_intf_addr = emulation_src_handle.kws.get(\"ipv6_intf_addr\", \"\")\n count = self.utils.intval(emulation_src_handle.kws, \"count\", 1)\n if ipv6_intf_addr:\n stream.kws[\"l3_protocol\"] = \"ipv6\"\n stream.kws[\"ipv6_src_addr\"] = ipv6_intf_addr\n if count > 1:\n stream.kws[\"ipv6_src_count\"] = count\n stream.kws[\"ipv6_src_mode\"] = \"increment\"\n else:\n stream.kws[\"l3_protocol\"] = \"ipv4\"\n stream.kws[\"ip_src_addr\"] = intf_ip_addr\n stream.kws[\"ip_src_count\"] = count\n if count > 1:\n stream.kws[\"ip_src_count\"] = count\n stream.kws[\"ip_src_mode\"] = \"increment\"\n try:\n stream.kws[\"mac_src\"] = [self.get_my_mac(emulation_src_handle)]\n stream.kws[\"mac_dst\"] = [self.get_arp_mac(emulation_src_handle)]\n except Exception as exp:\n self.logger.info(exp)\n self.logger.info(traceback.format_exc())\n self.logger.debug(\"updated stream.kws-1 = {}\".format(stream.kws))\n\n if \"emulation_dst_handle\" in stream.kws:\n emulation_dst_handle = stream.kws.pop(\"emulation_dst_handle\")\n intf_ip_addr = emulation_dst_handle.kws.get(\"intf_ip_addr\", \"\")\n ipv6_intf_addr = emulation_dst_handle.kws.get(\"ipv6_intf_addr\", \"\")\n count = self.utils.intval(emulation_dst_handle.kws, \"count\", 1)\n if ipv6_intf_addr:\n stream.kws[\"l3_protocol\"] = \"ipv6\"\n stream.kws[\"ipv6_dst_addr\"] = ipv6_intf_addr\n if count > 1:\n stream.kws[\"ipv6_dst_count\"] = count\n stream.kws[\"ipv6_dst_mode\"] = \"increment\"\n else:\n stream.kws[\"l3_protocol\"] = \"ipv4\"\n stream.kws[\"ip_dst_addr\"] = intf_ip_addr\n if count > 1:\n stream.kws[\"ip_dst_count\"] = count\n stream.kws[\"ip_dst_mode\"] = \"increment\"\n self.logger.debug(\"updated stream.kws-2 = {}\".format(stream.kws))\n\n def build_first(self, stream):\n\n self.fill_emulation_params(stream)\n\n kws = copy.deepcopy(stream.kws)\n\n self.logger.info(\"=========== build_first this {} = {}\".format(stream.stream_id, kws))\n # fill default variables\n mac_src = self.pop_mac(kws, \"mac_src\", \"00:00:01:00:00:01\")\n mac_dst = self.pop_mac(kws, \"mac_dst\", \"00:00:00:00:00:00\")\n\n duration = self.pop_int(kws, \"duration\", 1)\n rate_pps = self.pop_int(kws, \"rate_pps\", 1)\n l2_encap = self.pop_str(kws, \"l2_encap\", \"\")\n l3_protocol = self.pop_str(kws, \"l3_protocol\", \"\")\n l4_protocol = self.pop_str(kws, \"l4_protocol\", \"\")\n vlan_en = self.pop_str(kws, \"vlan\", \"enable\")\n vlan_id = self.pop_int(kws, \"vlan_id\", 0)\n vlan_cfi = self.pop_int(kws, \"vlan_cfi\", 0)\n vlan_prio = self.pop_int(kws, \"vlan_user_priority\", 0)\n frame_size = self.pop_int(kws, \"frame_size\", 64)\n frame_size_min = self.pop_int(kws, \"frame_size_min\", 64)\n frame_size_max = self.pop_int(kws, \"frame_size_max\", 9210)\n frame_size_step = self.pop_int(kws, \"frame_size_step\", 64)\n transmit_mode = self.pop_str(kws, \"transmit_mode\", \"continuous\")\n data_pattern = self.pop_str(kws, \"data_pattern\", \"\")\n pkts_per_burst = self.pop_int(kws, \"pkts_per_burst\", 1)\n ethernet_value = self.pop_hex(kws, \"ethernet_value\", 0)\n length_mode = self.pop_str(kws, \"length_mode\", \"fixed\")\n l3_length = self.pop_int(kws, \"l3_length\", 110)\n data_pattern_mode = self.pop_str(kws, \"data_pattern_mode\", \"fixed\")\n\n mac_dst_mode = kws.get(\"mac_dst_mode\", \"fixed\").strip()\n if mac_dst_mode not in [\"fixed\", \"increment\", \"decrement\", \"list\"]:\n self.logger.error(\"unhandled option mac_dst_mode = {}\".format(mac_dst_mode))\n mac_src_mode = kws.get(\"mac_src_mode\", \"fixed\").strip()\n if mac_src_mode not in [\"fixed\", \"increment\", \"decrement\", \"list\"]:\n self.logger.error(\"unhandled option mac_src_mode = {}\".format(mac_src_mode))\n\n self.ensure_int(\"l3_length\", l3_length, 44, 16365)\n if length_mode in [\"random\", \"increment\", \"incr\"]:\n self.ensure_int(\"frame_size_min\", frame_size_min, 44, 13312)\n self.ensure_int(\"frame_size_max\", frame_size_max, 44, 13312)\n self.ensure_int(\"frame_size_step\", frame_size_step, 0, 13312)\n elif length_mode != \"fixed\":\n self.logger.error(\"unhandled option length_mode = {}\".format(length_mode))\n\n if data_pattern_mode != \"fixed\":\n self.logger.error(\"unhandled option data_pattern_mode = {}\".format(data_pattern_mode))\n\n # update parsed values\n stream.kws[\"mac_src\"] = mac_src\n stream.kws[\"mac_dst\"] = mac_dst\n\n pkt = Ether()\n pkt.src = mac_src[0]\n pkt.dst = mac_dst[0]\n\n if ethernet_value:\n pkt.type = ethernet_value\n\n # add l3_protocol\n if l3_protocol == \"arp\":\n arp = ARP()\n arp.hwsrc = self.pop_str(kws, \"arp_src_hw_addr\", \"00:00:01:00:00:02\").replace(\".\", \":\")\n arp.hwdst = self.pop_str(kws, \"arp_dst_hw_addr\", \"00:00:00:00:00:00\").replace(\".\", \":\")\n arp.psrc = self.pop_str(kws, \"ip_src_addr\", \"0.0.0.0\")\n arp.pdst = self.pop_str(kws, \"ip_dst_addr\", \"192.0.0.1\")\n arp_oper = self.pop_str(kws, \"arp_operation\", \"arpRequest\")\n if arp_oper == \"arpRequest\":\n arp.op = 1\n elif arp_oper in [\"arpResponse\", \"arpReply\"]:\n arp.op = 2\n else:\n self.logger.debug(\"unknown ARP operation: {}\".format(arp_oper))\n arp = None\n if arp:\n pkt = self.check(pkt/arp)\n elif l3_protocol == \"ipv4\":\n ip = IP()\n #ip.id\n #ip.chksum\n ip.src = self.pop_str(kws, \"ip_src_addr\", \"0.0.0.0\")\n ip.dst = self.pop_str(kws, \"ip_dst_addr\", \"192.0.0.1\")\n ip.ttl = self.pop_int(kws, \"ip_ttl\", 255)\n #ip.frag\n #ip.len\n #ip.flags\n #ip.options\n proto = self.pop_int(kws, \"ip_proto\", -1)\n if proto >= 0: ip.proto = proto\n ip_dscp = self.pop_int(kws, \"ip_dscp\", 0)\n if ip_dscp:\n ip.tos = int(bin(ip_dscp) + \"00\", 2)\n else:\n ip.tos = ip.tos | (self.pop_int(kws, \"ip_precedence\", 0) << 5)\n ip.tos = ip.tos | (self.pop_int(kws, \"ip_delay\", 0) << 4)\n ip.tos = ip.tos | (self.pop_int(kws, \"ip_throughput\", 0) << 3)\n ip.tos = ip.tos | (self.pop_int(kws, \"ip_reliability\", 0) << 2)\n ip.tos = ip.tos | (self.pop_int(kws, \"ip_cost\", 0) << 1)\n ip.tos = ip.tos | (self.pop_int(kws, \"ip_reserved\", 0) << 0)\n pkt = self.check(pkt/ip)\n\n # add l4_protocol\n if l4_protocol in [\"udp\"]:\n udp = self.build_udp(kws)\n pkt = self.check(pkt/udp)\n elif l4_protocol in [\"tcp\"]:\n tcp = self.build_tcp(kws)\n pkt = self.check(pkt/tcp)\n elif l4_protocol in [\"icmp\"]:\n icmp = self.build_icmp(kws)\n pkt = self.check(pkt/icmp)\n elif l4_protocol in [\"igmp\"]:\n igmp = self.build_igmp(kws)\n if igmp:\n pkt = self.check(pkt/igmp)\n elif l4_protocol:\n self.logger.todo(\"unsupported-ipv4\", \"l4_protocol\", l4_protocol)\n elif l3_protocol == \"ipv6\":\n ip6 = IPv6()\n ip6.src = self.pop_str(kws, \"ipv6_src_addr\", \"fe80:0:0:0:0:0:0:12\")\n ip6.dst = self.pop_str(kws, \"ipv6_dst_addr\", \"fe80:0:0:0:0:0:0:22\")\n ip6.hlim = self.pop_int(kws, \"ipv6_hop_limit\", 255)\n ip6.tc = self.pop_int(kws, \"ipv6_traffic_class\", 255)\n nh = self.pop_int(kws, \"ipv6_next_header\", 0)\n if nh: ip6.nh = nh\n # add l4_protocol\n pkt = self.check(pkt/ip6)\n if l4_protocol in [\"udp\"]:\n udp = self.build_udp(kws)\n pkt = self.check(pkt/udp)\n elif l4_protocol in [\"tcp\"]:\n tcp = self.build_tcp(kws)\n pkt = self.check(pkt/tcp)\n elif l4_protocol in [\"icmp\"]:\n icmp = self.build_icmp6(kws)\n pkt = self.check(pkt/icmp)\n elif l4_protocol in [\"igmp\"]:\n igmp = self.build_igmp(kws)\n if igmp:\n pkt = self.check(pkt/igmp)\n elif l4_protocol:\n self.logger.todo(\"unsupported-ipv6\", \"l4_protocol\", l4_protocol)\n elif l3_protocol:\n self.logger.todo(\"unsupported\", \"l3_protocol\", l3_protocol)\n return None\n\n # insert VLAN header if required\n if l2_encap in [\"ethernet_ii_vlan\", \"ethernet_ii\"] and vlan_id > 0 and vlan_en == \"enable\":\n (payload, payload_type) = (pkt.payload, pkt.type)\n pkt.remove_payload()\n pkt.type = 0x8100\n pkt = self.check(pkt/Dot1Q(vlan=vlan_id, id=vlan_cfi, prio=vlan_prio, type=payload_type)/payload)\n #self.trace_packet(pkt)\n\n # handle transmit_mode\n if transmit_mode == \"continuous\":\n max_loops = 0\n elif transmit_mode == \"single_burst\":\n max_loops = pkts_per_burst\n if rate_pps < pkts_per_burst:\n rate_pps = pkts_per_burst\n else:\n self.logger.debug(\"TODO: transmit_mode={}\".format(transmit_mode))\n max_loops = 0\n\n # append the data pattern if specified\n if data_pattern:\n padding = Padding()\n tmp_pattern = ''.join(c for c in data_pattern if c not in ' ')\n tmp_pattern = binascii.unhexlify(tmp_pattern)\n padLen = int(frame_size - len(pkt) - 4 - len(padding))\n if len(tmp_pattern) > padLen:\n padding = Padding(tmp_pattern[:padLen])\n else:\n padding = Padding(tmp_pattern)\n pkt = self.check(pkt/padding)\n\n # update padding length based on frame_size\n if length_mode == \"fixed\":\n padLen = int(frame_size - len(pkt) - 4)\n if padLen > 0:\n padding = Padding(binascii.unhexlify('00' * padLen))\n pkt = self.check(pkt/padding)\n\n # verify unhandled options\n for key, value in kws.items():\n if key not in stale_list_ignore:\n self.logger.error(\"unhandled option {} = {}\".format(key, value))\n\n pwa = SpyTestDict()\n pwa.pkt = pkt\n pwa.left = max_loops\n pwa.transmit_mode = transmit_mode\n if rate_pps > self.max_rate_pps:\n self.logger.debug(\"drop the rate from {} to {}\".format(rate_pps, self.max_rate_pps))\n rate_pps = self.max_rate_pps\n pwa.rate_pps = rate_pps\n pwa.duration = duration\n pwa.stream = stream\n pwa.mac_src_count = 0\n pwa.mac_dst_count = 0\n pwa.arp_src_hw_count = 0\n pwa.arp_dst_hw_count = 0\n pwa.ip_src_count = 0\n pwa.ip_dst_count = 0\n pwa.ipv6_src_count = 0\n pwa.ipv6_dst_count = 0\n pwa.vlan_id_count = 0\n pwa.tcp_src_port_count = 0\n pwa.tcp_dst_port_count = 0\n pwa.udp_src_port_count = 0\n pwa.udp_dst_port_count = 0\n ##self.trace_packet(pkt)\n #self.logger.debug(pwa)\n\n pwa.length_mode = length_mode\n pwa.frame_size_current = frame_size_min\n pwa.frame_size_min = frame_size_min\n pwa.frame_size_max = frame_size_max\n pwa.frame_size_step = frame_size_step\n self.add_padding(pwa, True)\n\n return pwa\n\n def add_padding(self, pwa, first):\n pwa.padding = None\n if pwa.length_mode == \"random\":\n pktLen = len(pwa.pkt)\n frame_size = random.randrange(pwa.frame_size_min, pwa.frame_size_max+1)\n padLen = int(frame_size - pktLen - 4)\n if padLen > 0:\n pwa.padding = Padding(binascii.unhexlify('00' * padLen))\n elif pwa.length_mode in [\"increment\", \"incr\"]:\n pktLen = len(pwa.pkt)\n if first:\n frame_size = pwa.frame_size_min\n else:\n frame_size = pwa.frame_size_current + pwa.frame_size_step\n if frame_size > pwa.frame_size_max:\n pwa.frame_size_current = pwa.frame_size_min\n else:\n pwa.frame_size_current = frame_size\n padLen = int(pwa.frame_size_current - pktLen - 4)\n if padLen > 0:\n pwa.padding = Padding(binascii.unhexlify('00' * padLen))\n\n def build_next_dma(self, pwa):\n\n # Change Ether SRC MAC\n mac_src_mode = pwa.stream.kws.get(\"mac_src_mode\", \"fixed\").strip()\n mac_src_step = pwa.stream.kws.get(\"mac_src_step\", \"00:00:00:00:00:01\")\n mac_src_count = self.utils.intval(pwa.stream.kws, \"mac_src_count\", 0)\n if mac_src_mode in [\"increment\", \"decrement\"]:\n if mac_src_mode in [\"increment\"]:\n pwa.pkt[0].src = self.utils.incrementMac(pwa.pkt[0].src, mac_src_step)\n else:\n pwa.pkt[0].src = self.utils.decrementMac(pwa.pkt[0].src, mac_src_step)\n pwa.mac_src_count = pwa.mac_src_count + 1\n if mac_src_count > 0 and pwa.mac_src_count >= mac_src_count:\n pwa.pkt[0].src = pwa.stream.kws[\"mac_src\"][0]\n pwa.mac_src_count = 0\n elif mac_src_mode in [\"list\"]:\n pwa.mac_src_count = pwa.mac_src_count + 1\n if pwa.mac_src_count >= len(pwa.stream.kws[\"mac_src\"]):\n pwa.pkt[0].src = pwa.stream.kws[\"mac_src\"][0]\n pwa.mac_src_count = 0\n else:\n pwa.pkt[0].src = pwa.stream.kws[\"mac_src\"][pwa.mac_src_count]\n elif mac_src_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"mac_src_mode\", mac_src_mode)\n\n # Change Ether DST MAC\n mac_dst_mode = pwa.stream.kws.get(\"mac_dst_mode\", \"fixed\").strip()\n mac_dst_step = pwa.stream.kws.get(\"mac_dst_step\", \"00:00:00:00:00:01\")\n mac_dst_count = self.utils.intval(pwa.stream.kws, \"mac_dst_count\", 0)\n if mac_dst_mode in [\"increment\", \"decrement\"]:\n if mac_dst_mode in [\"increment\"]:\n pwa.pkt[0].dst = self.utils.incrementMac(pwa.pkt[0].dst, mac_dst_step)\n else:\n pwa.pkt[0].dst = self.utils.decrementMac(pwa.pkt[0].dst, mac_dst_step)\n pwa.mac_dst_count = pwa.mac_dst_count + 1\n if mac_dst_count > 0 and pwa.mac_dst_count >= mac_dst_count:\n pwa.pkt[0].dst = pwa.stream.kws[\"mac_dst\"][0]\n pwa.mac_dst_count = 0\n elif mac_dst_mode in [\"list\"]:\n pwa.mac_dst_count = pwa.mac_dst_count + 1\n if pwa.mac_dst_count >= len(pwa.stream.kws[\"mac_dst\"]):\n pwa.pkt[0].dst = pwa.stream.kws[\"mac_dst\"][0]\n pwa.mac_dst_count = 0\n else:\n pwa.pkt[0].dst = pwa.stream.kws[\"mac_dst\"][pwa.mac_dst_count]\n elif mac_dst_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"mac_dst_mode\", mac_dst_mode)\n\n # Change ARP SRC MAC\n if ARP in pwa.pkt:\n arp_src_hw_mode = pwa.stream.kws.get(\"arp_src_hw_mode\", \"fixed\").strip()\n arp_src_hw_step = pwa.stream.kws.get(\"arp_src_hw_step\", \"00:00:00:00:00:01\")\n arp_src_hw_count = self.utils.intval(pwa.stream.kws, \"arp_src_hw_count\", 0)\n if arp_src_hw_mode in [\"increment\", \"decrement\"]:\n if arp_src_hw_mode in [\"increment\"]:\n pwa.pkt[ARP].hwsrc = self.utils.incrementMac(pwa.pkt[ARP].hwsrc, arp_src_hw_step)\n else:\n pwa.pkt[ARP].hwsrc = self.utils.decrementMac(pwa.pkt[ARP].hwsrc, arp_src_hw_step)\n pwa.arp_src_hw_count = pwa.arp_src_hw_count + 1\n if arp_src_hw_count > 0 and pwa.arp_src_hw_count >= arp_src_hw_count:\n pwa.pkt[ARP].hwsrc = pwa.stream.kws.get(\"arp_src_hw_addr\", \"00:00:01:00:00:02\").replace(\".\", \":\")\n pwa.arp_src_hw_count = 0\n elif arp_src_hw_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"arp_src_hw_mode\", arp_src_hw_mode)\n\n # Change ARP DST MAC\n if ARP in pwa.pkt:\n arp_dst_hw_mode = pwa.stream.kws.get(\"arp_dst_hw_mode\", \"fixed\").strip()\n arp_dst_hw_step = pwa.stream.kws.get(\"arp_dst_hw_step\", \"00:00:00:00:00:01\")\n arp_dst_hw_count = self.utils.intval(pwa.stream.kws, \"arp_dst_hw_count\", 0)\n if arp_dst_hw_mode in [\"increment\", \"decrement\"]:\n if arp_dst_hw_mode in [\"increment\"]:\n pwa.pkt[ARP].hwdst = self.utils.incrementMac(pwa.pkt[ARP].hwdst, arp_dst_hw_step)\n else:\n pwa.pkt[ARP].hwdst = self.utils.decrementMac(pwa.pkt[ARP].hwdst, arp_dst_hw_step)\n pwa.arp_dst_hw_count = pwa.arp_dst_hw_count + 1\n if arp_dst_hw_count > 0 and pwa.arp_dst_hw_count >= arp_dst_hw_count:\n pwa.pkt[ARP].hwdst = pwa.stream.kws.get(\"arp_dst_hw_addr\", \"00:00:00:00:00:00\").replace(\".\", \":\")\n pwa.arp_dst_hw_count = 0\n elif arp_dst_hw_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"arp_dst_hw_mode\", arp_dst_hw_mode)\n\n # Change SRC IP\n if IP in pwa.pkt:\n ip_src_mode = pwa.stream.kws.get(\"ip_src_mode\", \"fixed\").strip()\n ip_src_step = pwa.stream.kws.get(\"ip_src_step\", \"0.0.0.1\")\n ip_src_count = self.utils.intval(pwa.stream.kws, \"ip_src_count\", 0)\n if ip_src_mode in [\"increment\", \"decrement\"]:\n if ip_src_mode in [\"increment\"]:\n pwa.pkt[IP].src = self.utils.incrementIPv4(pwa.pkt[IP].src, ip_src_step)\n else:\n pwa.pkt[IP].src = self.utils.decrementIPv4(pwa.pkt[IP].src, ip_src_step)\n pwa.ip_src_count = pwa.ip_src_count + 1\n if ip_src_count > 0 and pwa.ip_src_count >= ip_src_count:\n pwa.pkt[IP].src = pwa.stream.kws.get(\"ip_src_addr\", \"0.0.0.0\")\n pwa.ip_src_count = 0\n elif ip_src_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"ip_src_mode\", ip_src_mode)\n\n # Change DST IP\n if IP in pwa.pkt:\n ip_dst_mode = pwa.stream.kws.get(\"ip_dst_mode\", \"fixed\").strip()\n ip_dst_step = pwa.stream.kws.get(\"ip_dst_step\", \"0.0.0.1\")\n ip_dst_count = self.utils.intval(pwa.stream.kws, \"ip_dst_count\", 0)\n if ip_dst_mode in [\"increment\", \"decrement\"]:\n if ip_dst_mode in [\"increment\"]:\n pwa.pkt[IP].dst = self.utils.incrementIPv4(pwa.pkt[IP].dst, ip_dst_step)\n else:\n pwa.pkt[IP].dst = self.utils.decrementIPv4(pwa.pkt[IP].dst, ip_dst_step)\n pwa.ip_dst_count = pwa.ip_dst_count + 1\n if ip_dst_count > 0 and pwa.ip_dst_count >= ip_dst_count:\n pwa.pkt[IP].dst = pwa.stream.kws.get(\"ip_dst_addr\", \"192.0.0.1\")\n pwa.ip_dst_count = 0\n elif ip_dst_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"ip_dst_mode\", ip_dst_mode)\n\n # Change SRC IPv6\n if IPv6 in pwa.pkt:\n ipv6_src_mode = pwa.stream.kws.get(\"ipv6_src_mode\", \"fixed\").strip()\n ipv6_src_step = pwa.stream.kws.get(\"ipv6_src_step\", \"::1\")\n ipv6_src_count = self.utils.intval(pwa.stream.kws, \"ipv6_src_count\", 0)\n if ipv6_src_mode in [\"increment\", \"decrement\"]:\n if ipv6_src_mode in [\"increment\"]:\n pwa.pkt[IPv6].src = self.utils.incrementIPv6(pwa.pkt[IPv6].src, ipv6_src_step)\n else:\n pwa.pkt[IPv6].src = self.utils.decrementIPv6(pwa.pkt[IPv6].src, ipv6_src_step)\n pwa.ipv6_src_count = pwa.ipv6_src_count + 1\n if ipv6_src_count > 0 and pwa.ipv6_src_count >= ipv6_src_count:\n pwa.pkt[IPv6].src = pwa.stream.kws.get(\"ipv6_src_addr\", \"fe80:0:0:0:0:0:0:12\")\n pwa.ipv6_src_count = 0\n elif ipv6_src_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"ipv6_src_mode\", ipv6_src_mode)\n\n # Change DST IPv6\n if IPv6 in pwa.pkt:\n ipv6_dst_mode = pwa.stream.kws.get(\"ipv6_dst_mode\", \"fixed\").strip()\n ipv6_dst_step = pwa.stream.kws.get(\"ipv6_dst_step\", \"::1\")\n ipv6_dst_count = self.utils.intval(pwa.stream.kws, \"ipv6_dst_count\", 0)\n if ipv6_dst_mode in [\"increment\", \"decrement\"]:\n if ipv6_dst_mode in [\"increment\"]:\n pwa.pkt[IPv6].dst = self.utils.incrementIPv6(pwa.pkt[IPv6].dst, ipv6_dst_step)\n else:\n pwa.pkt[IPv6].dst = self.utils.decrementIPv6(pwa.pkt[IPv6].dst, ipv6_dst_step)\n pwa.ipv6_dst_count = pwa.ipv6_dst_count + 1\n if ipv6_dst_count > 0 and pwa.ipv6_dst_count >= ipv6_dst_count:\n pwa.pkt[IPv6].dst = pwa.stream.kws.get(\"ipv6_dst_addr\", \"fe80:0:0:0:0:0:0:22\")\n pwa.ipv6_dst_count = 0\n elif ipv6_dst_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"ipv6_dst_mode\", ipv6_dst_mode)\n\n # Change VLAN\n if Dot1Q in pwa.pkt:\n vlan_id_mode = pwa.stream.kws.get(\"vlan_id_mode\", \"fixed\").strip()\n vlan_id_step = self.utils.intval(pwa.stream.kws, \"vlan_id_step\", 1)\n vlan_id_count = self.utils.intval(pwa.stream.kws, \"vlan_id_count\", 0)\n if vlan_id_mode in [\"increment\", \"decrement\"]:\n if vlan_id_mode in [\"increment\"]:\n pwa.pkt[Dot1Q].vlan = pwa.pkt[Dot1Q].vlan + vlan_id_step\n else:\n pwa.pkt[Dot1Q].vlan = pwa.pkt[Dot1Q].vlan - vlan_id_step\n pwa.vlan_id_count = pwa.vlan_id_count + 1\n if vlan_id_count > 0 and pwa.vlan_id_count >= vlan_id_count:\n pwa.pkt[Dot1Q].vlan = self.utils.intval(pwa.stream.kws, \"vlan_id\", 0)\n pwa.vlan_id_count = 0\n elif vlan_id_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"vlan_id_mode\", vlan_id_mode)\n\n # Change TCP SRC PORT\n if TCP in pwa.pkt:\n tcp_src_port_mode = pwa.stream.kws.get(\"tcp_src_port_mode\", \"fixed\").strip()\n tcp_src_port_step = self.utils.intval(pwa.stream.kws, \"tcp_src_port_step\", 1)\n tcp_src_port_count = self.utils.intval(pwa.stream.kws, \"tcp_src_port_count\", 0)\n if tcp_src_port_mode in [\"increment\", \"decrement\", \"incr\", \"decr\"]:\n if tcp_src_port_mode in [\"increment\", \"incr\"]:\n pwa.pkt[TCP].sport = pwa.pkt[TCP].sport + tcp_src_port_step\n else:\n pwa.pkt[TCP].sport = pwa.pkt[TCP].sport - tcp_src_port_step\n pwa.tcp_src_port_count = pwa.tcp_src_port_count + 1\n if tcp_src_port_count > 0 and pwa.tcp_src_port_count >= tcp_src_port_count:\n pwa.pkt[TCP].sport = self.utils.intval(pwa.stream.kws, \"tcp_src_port\", 0)\n pwa.tcp_src_port_count = 0\n elif tcp_src_port_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"tcp_src_port_mode\", tcp_src_port_mode)\n\n # Change TCP DST PORT\n if TCP in pwa.pkt:\n tcp_dst_port_mode = pwa.stream.kws.get(\"tcp_dst_port_mode\", \"fixed\").strip()\n tcp_dst_port_step = self.utils.intval(pwa.stream.kws, \"tcp_dst_port_step\", 1)\n tcp_dst_port_count = self.utils.intval(pwa.stream.kws, \"tcp_dst_port_count\", 0)\n if tcp_dst_port_mode in [\"increment\", \"decrement\", \"incr\", \"decr\"]:\n if tcp_dst_port_mode in [\"increment\", \"incr\"]:\n pwa.pkt[TCP].dport = pwa.pkt[TCP].sport + tcp_dst_port_step\n else:\n pwa.pkt[TCP].dport = pwa.pkt[TCP].sport - tcp_dst_port_step\n pwa.tcp_dst_port_count = pwa.tcp_dst_port_count + 1\n if tcp_dst_port_count > 0 and pwa.tcp_dst_port_count >= tcp_dst_port_count:\n pwa.pkt[TCP].dport = self.utils.intval(pwa.stream.kws, \"tcp_dst_port\", 0)\n pwa.tcp_dst_port_count = 0\n elif tcp_dst_port_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"tcp_dst_port_mode\", tcp_dst_port_mode)\n\n # Change UDP SRC PORT\n if UDP in pwa.pkt:\n udp_src_port_mode = pwa.stream.kws.get(\"udp_src_port_mode\", \"fixed\").strip()\n udp_src_port_step = self.utils.intval(pwa.stream.kws, \"udp_src_port_step\", 1)\n udp_src_port_count = self.utils.intval(pwa.stream.kws, \"udp_src_port_count\", 0)\n if udp_src_port_mode in [\"increment\", \"decrement\", \"incr\", \"decr\"]:\n if udp_src_port_mode in [\"increment\", \"incr\"]:\n pwa.pkt[UDP].sport = pwa.pkt[UDP].sport + udp_src_port_step\n else:\n pwa.pkt[UDP].sport = pwa.pkt[UDP].sport - udp_src_port_step\n pwa.udp_src_port_count = pwa.udp_src_port_count + 1\n if udp_src_port_count > 0 and pwa.udp_src_port_count >= udp_src_port_count:\n pwa.pkt[UDP].sport = self.utils.intval(pwa.stream.kws, \"udp_src_port\", 0)\n pwa.udp_src_port_count = 0\n elif udp_src_port_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"udp_src_port_mode\", udp_src_port_mode)\n\n # Change UDP DST PORT\n if UDP in pwa.pkt:\n udp_dst_port_mode = pwa.stream.kws.get(\"udp_dst_port_mode\", \"fixed\").strip()\n udp_dst_port_step = self.utils.intval(pwa.stream.kws, \"udp_dst_port_step\", 1)\n udp_dst_port_count = self.utils.intval(pwa.stream.kws, \"udp_dst_port_count\", 0)\n if udp_dst_port_mode in [\"increment\", \"decrement\", \"incr\", \"decr\"]:\n if udp_dst_port_mode in [\"increment\", \"incr\"]:\n pwa.pkt[UDP].dport = pwa.pkt[UDP].sport + udp_dst_port_step\n else:\n pwa.pkt[UDP].dport = pwa.pkt[UDP].sport - udp_dst_port_step\n pwa.udp_dst_port_count = pwa.udp_dst_port_count + 1\n if udp_dst_port_count > 0 and pwa.udp_dst_port_count >= udp_dst_port_count:\n pwa.pkt[UDP].dport = self.utils.intval(pwa.stream.kws, \"udp_dst_port\", 0)\n pwa.udp_dst_port_count = 0\n elif udp_dst_port_mode != \"fixed\":\n self.logger.todo(\"unhandled\", \"udp_dst_port_mode\", udp_dst_port_mode)\n\n # add padding based on length_mode\n self.add_padding(pwa, False)\n\n return pwa\n\n def build_next(self, pwa):\n if self.dbg > 1:\n self.logger.debug(\"build_next transmit_mode={} left={}\".format(pwa.transmit_mode, pwa.left))\n\n if pwa.transmit_mode in [\"continuous\", \"continuous_burst\"] or pwa.left > 1:\n pwa = self.build_next_dma(pwa)\n if not pwa: return None\n\n if pwa.transmit_mode in [\"continuous\", \"continuous_burst\"]:\n return pwa\n elif pwa.left > 1:\n pwa.left = pwa.left - 1\n return pwa\n elif pwa.transmit_mode in [\"single_burst\"]:\n return None\n else:\n self.logger.debug(\"TODO: transmit_mode = {}\".format(pwa.transmit_mode))\n return None\n\n def match_stream(self, stream, pkt):\n rx_str = hexlify(bytes(str(pkt)))\n #print(\"CMP: STREAM: {}\".format(stream.kws))\n #print(\"CMP: TRACK_RX: {}\".format(rx_str))\n for track_pkt in stream.track_pkts:\n tx_str = hexlify(track_pkt)\n #print(\"CMP: TRACK_TX: {}\".format(tx_str))\n if len(tx_str) <= len(rx_str):\n if tx_str == rx_str[:len(tx_str)]:\n return True\n return False\n #if pkt.src != stream.kws.get(\"mac_src\"): return False\n #if pkt.dst != stream.kws.get(\"mac_dst\"): return False\n\n #if IPv6 in pkt and stream.kws.get(\"l3_protocol\") != \"ipv6\": return False\n #if IP in pkt and stream.kws.get(\"l3_protocol\") != \"ipv4\": return False\n\n #if IP in pkt:\n #if pkt[IP].src != stream.kws.get(\"ip_src_addr\"): return False\n #if pkt[IP].dst != stream.kws.get(\"ip_dst_addr\"): return False\n #if TCP in pkt:\n #if pkt[TCP].sport != self.get_int(stream.kws, \"tcp_src_port\", 0): return False\n #if pkt[TCP].dport != self.get_int(stream.kws, \"tcp_dst_port\", 0): return False\n #if UDP in pkt:\n #if pkt[UDP].sport != self.get_int(stream.kws, \"udp_src_port\", 0): return False\n #if pkt[UDP].dport != self.get_int(stream.kws, \"udp_dst_port\", 0): return False\n #return True\n\n def if_create_one(self, index, intf, ip4addr, ip4gw, ip6addr, ip6gw, smac, **kws):\n ns = \"{}_{}\".format(intf.name, index)\n\n vlan_enable = self.utils.intval(kws, \"vlan\", 0)\n vlan_id = self.utils.intval(kws, \"vlan_id\", 1)\n veth_name = \"veth0\" if vlan_enable else \"veth1\"\n\n #self.logger.debug(\"vlan_enable={} vlan_id={}\".format(vlan_enable, vlan_id))\n\n # remove existing\n self.if_delete_one(index, intf)\n\n # create the linux interface\n cmds = textwrap.dedent(\"\"\"\n ip netns add ns_{0}\n ip link add veth_{0} type veth peer name {2}\n ip link set {2} netns ns_{0}\n ip netns exec ns_{0} ethtool --offload {2} rx off tx off > /dev/null 2>&1\n ip netns exec ns_{0} ip link set dev {2} up\n ip netns exec ns_{0} ethtool --offload {2} rx off tx off\n ip link set dev veth_{0} up\n #brctl addif {1}-br veth_{0}\n ip link set dev veth_{0} master {1}-br\n \"\"\".format(ns, intf.iface, veth_name))\n\n if vlan_enable:\n cmds += textwrap.dedent(\"\"\"\n ip netns exec ns_{0} ip link add link veth0 name veth1 type vlan id {1}\n ip netns exec ns_{0} ip link set veth1 up\n \"\"\".format(ns, vlan_id))\n\n # set interface mac address\n if smac != \"00:00:00:00:00:00\":\n cmds += \"\\nip netns exec ns_{0} ip link set veth1 address {1}\".format(ns, smac)\n\n # assign IPv4 to linux interface\n if ip4addr:\n cmds += \"\\nip netns exec ns_{0} ip addr add {1}/{2} dev veth1\".format(ns, ip4addr, 24)\n if ip4gw:\n cmds += \"\\nip netns exec ns_{0} ip route add default via {1}\".format(ns, ip4gw)\n\n # assign IPv6 to linux interface\n if ip6addr:\n ip6prefix = self.utils.intval(intf.kws, \"ipv6_prefix_length\", 64)\n cmds += \"\\nip netns exec ns_{0} ip -6 addr add {1}/{2} dev veth1\".format(ns, ip6addr, ip6prefix)\n if ip6gw:\n cmds += \"\\nip netns exec ns_{0} ip -6 route add default via {1}\".format(ns, ip6gw)\n\n # send Arp request\n arp_send_req = self.utils.intval(kws, \"arp_send_req\", 1)\n if arp_send_req:\n if ip4gw:\n cmds += textwrap.dedent(\"\"\"\n ip netns exec ns_{0} arping -c 1 -I veth1 {1}\n ip netns exec ns_{0} arp -n\n \"\"\".format(ns, ip4gw))\n\n if self.dbg > 1:\n cmds += textwrap.dedent(\"\"\"\n ip netns exec ns_{0} ifconfig veth1\n ip netns exec ns_{0} ip addr ls veth1\n \"\"\".format(ns))\n\n # execute the commands\n self.utils.shexec(cmds)\n\n def if_create(self, intf):\n smac = intf.kws.get(\"src_mac_addr\", \"00:00:00:00:00:00\").replace(\".\", \":\")\n count = self.utils.intval(intf.kws, \"count\", 1)\n\n # set IPv4 Address\n ip4addr = intf.kws.get(\"intf_ip_addr\", \"\")\n ip4addr_step = intf.kws.get(\"intf_ip_addr_step\", \"0.0.0.1\")\n ip4gw = intf.kws.get(\"gateway\", \"\")\n ip4gw_step = intf.kws.get(\"gateway_step\", \"0.0.0.0\")\n if ip4addr:\n for index in range(count):\n self.if_create_one(index, intf, ip4addr, ip4gw, None, None, smac, **intf.kws)\n ip4addr = self.utils.incrementIPv4(ip4addr, ip4addr_step)\n ip4gw = self.utils.incrementIPv4(ip4gw, ip4gw_step)\n if smac != \"00:00:00:00:00:00\":\n smac = self.utils.incrementMac(smac, \"00:00:00:00:00:01\")\n # set IPv6 Address\n ip6addr = intf.kws.get(\"ipv6_intf_addr\", \"\")\n ip6gw = intf.kws.get(\"ipv6_gateway\", \"\")\n if ip6addr:\n self.if_create_one(0, intf, None, None, ip6addr, ip6gw, smac, **intf.kws)\n\n def if_delete_one(self, index, intf):\n ns = \"{}_{}\".format(intf.name, index)\n\n # remove the linux interface\n cmds = textwrap.dedent(\"\"\"\n ip netns del ns_{0}\n ip link del veth_{0}\n \"\"\".format(ns))\n self.utils.shexec(cmds)\n\n def if_delete(self, intf):\n count = self.utils.intval(intf.kws, \"count\", 1)\n for index in range(count):\n self.if_delete_one(index, intf)\n\n def get_my_mac(self, intf, default=\"00:00:00:00:00:00\"):\n ns = \"{}_{}\".format(intf.name, 0)\n cmd = \"ip netns exec ns_{0} cat /sys/class/net/veth1/address\".format(ns)\n output = self.utils.cmdexec(cmd).lower()\n self.logger.debug(\"{} = {}\".format(cmd, output))\n return output\n\n def get_arp_mac(self, intf, default=\"00:00:00:00:00:00\"):\n ipv6gw = intf.kws.get(\"ipv6_gateway\", \"\")\n ipv4gw = intf.kws.get(\"gateway\", \"0.0.0.0\")\n ns = \"{}_{}\".format(intf.name, 0)\n self.logger.debug(\"get_arp_mac {} {}\".format(ipv4gw, ipv6gw))\n\n # try getting from ARP cache\n try:\n cmd = \"ip netns exec ns_{0} cat /proc/net/arp\".format(ns)\n output = self.utils.cmdexec(cmd).lower()\n self.logger.debug(\"{} = \\n {}\".format(cmd, output))\n\n cmd = \"ip netns exec ns_{0} arp -n {1}\".format(ns, ipv4gw)\n output = self.utils.cmdexec(cmd).lower()\n self.logger.debug(\"{} = \\n {}\".format(cmd, output))\n\n return re.search(r\"(([a-f\\d]{1,2}\\:){5}[a-f\\d]{1,2})\", output).groups()[0]\n except Exception as exp:\n self.logger.debug(\"Fail-1: get_arp_mac {} {} {}\".format(ipv4gw, ipv6gw, exp))\n\n # try getting from arping output\n try:\n cmd = \"ip netns exec ns_{0} arping -c 1 -I veth1 {1}\".format(ns, ipv4gw)\n output = self.utils.cmdexec(cmd).lower()\n self.logger.debug(\"{} = \\n {}\".format(cmd, output))\n return re.search(r\"(([a-f\\d]{1,2}\\:){5}[a-f\\d]{1,2})\", output).groups()[0]\n except Exception as exp:\n self.logger.debug(\"Fail-2: get_arp_mac {} {} {}\".format(ipv4gw, ipv6gw, exp))\n\n return default\n\n def ping(self, intf, ping_dst, index=0):\n ns = \"{}_{}\".format(intf.name, index)\n\n cmd = \"ip netns exec ns_{0} ping -c 1 {1}\".format(ns, ping_dst)\n try:\n ip = ipaddress.ip_address(unicode(ping_dst))\n if ip._version == 6:\n cmd = \"ip netns exec ns_{0} ping6 -c 1 {1}\".format(ns, ping_dst)\n except Exception as exp:\n self.logger.error(exp)\n\n # execute the command\n return self.utils.cmdexec(cmd)\n\n def send_arp(self, intf, index=0):\n ns = \"{}_{}\".format(intf.name, index)\n ip4gw = intf.kws.get(\"gateway\", \"0.0.0.0\")\n cmd = \"ip netns exec ns_{0} arping -c 1 -I veth1 {1}\".format(ns, ip4gw)\n return self.utils.cmdexec(cmd)\n\n def config_yabgp_one(self, enable, intf, index):\n ns = \"{}_{}\".format(intf.name, index)\n intf_ip_addr = intf.kws.get(\"intf_ip_addr\", \"\")\n #ipv6_intf_addr = intf.kws.get(\"ipv6_intf_addr\", \"\")\n remote_ip_addr = intf.bgp_kws.get(\"remote_ip_addr\", \"0.0.0.0\")\n remote_as = self.utils.intval(intf.bgp_kws, \"remote_as\", 65001)\n local_as = self.utils.intval(intf.bgp_kws, \"local_as\", 65007)\n cwd = os.getcwd()\n logfile = \"{}/logs/current/bgpd_{}.log\".format(cwd, ns)\n pidfile = \"{}/logs/current/bgpd_{}.pid\".format(cwd, ns)\n if not enable:\n cmd = \"pkill -F {}\".format(pidfile)\n return self.utils.cmdexec(cmd)\n cmds = textwrap.dedent(\"\"\"\n ip netns exec ns_{0} \\\n yabgpd --bgp-afi_safi=ipv4 \\\n --bgp-local_addr={1} --bgp-local_as={2} \\\n --bgp-remote_addr={3} --bgp-remote_as={4} \\\n --rest-bind_host 127.0.0.1 --rest-bind_port 5555 \\\n --nouse-stderr --log-file={5} \\\n & echo $! > {6}\n \"\"\".format(ns, intf_ip_addr, local_as, remote_ip_addr, remote_as, logfile, pidfile))\n self.utils.shexec(cmds)\n\n def config_yabgp(self, enable, intf):\n count = self.utils.intval(intf.kws, \"count\", 1)\n for index in range(count):\n self.config_yabgp_one(enable, intf, index)\n\n def config_exabgp_one(self, enable, intf, index=0):\n ns = \"{}_{}\".format(intf.name, index)\n\n cwd = os.getcwd()\n logfile = \"{}/logs/current/exabgpd_{}.log\".format(cwd, ns)\n pidfile = \"{}/logs/current/exabgpd_{}.pid\".format(cwd, ns)\n envfile = \"{}/logs/current/exabgpd_{}.env\".format(cwd, ns)\n cfgfile = \"{}/logs/current/exabgpd_{}.cfg\".format(cwd, ns)\n if not enable:\n cmd = \"pkill -F {}\".format(pidfile)\n self.utils.cmdexec(cmd)\n return True\n\n intf_ip_addr = intf.kws.get(\"intf_ip_addr\", \"\")\n ipv6_intf_addr = intf.kws.get(\"ipv6_intf_addr\", \"\")\n if ipv6_intf_addr: intf_ip_addr = ipv6_intf_addr\n\n remote_ip_addr = intf.bgp_kws.get(\"remote_ip_addr\", \"0.0.0.0\")\n remote_ipv6_addr = intf.bgp_kws.get(\"remote_ipv6_addr\", \"\")\n if remote_ipv6_addr: remote_ip_addr = remote_ipv6_addr\n\n remote_as = self.utils.intval(intf.bgp_kws, \"remote_as\", 65001)\n local_as = self.utils.intval(intf.bgp_kws, \"local_as\", 65007)\n ip_version = self.utils.intval(intf.bgp_kws, \"ip_version\", 4)\n\n cmds = textwrap.dedent(\"\"\"\n neighbor {3} {{\n router-id {1};\n local-address {1};\n local-as {2};\n peer-as {4};\n }}\n \"\"\".format(ns, intf_ip_addr, local_as, remote_ip_addr, remote_as))\n self.utils.fwrite(cmds, cfgfile)\n\n cmds = textwrap.dedent(\"\"\"\n [exabgp.api]\n pipename = '{0}'\n\n [exabgp.daemon]\n pid = '{1}'\n daemonize = true\n drop = false\n\n [exabgp.log]\n all = true\n destination = '{2}'\n \"\"\".format(ns, pidfile, logfile))\n self.utils.fwrite(cmds, envfile)\n\n cmds = textwrap.dedent(\"\"\"\n cat {1} {2}\n mkfifo //run/{0}.{{in,out}}\n chmod 600 //run/{0}.{{in,out}}\n exabgp --env {1} {2}\n \"\"\".format(ns, envfile, cfgfile))\n sh_file = self.utils.fwrite(cmds)\n\n cmds = textwrap.dedent(\"\"\"\n ip netns exec ns_{0} bash {1}\n \"\"\".format(ns, sh_file))\n self.utils.shexec(cmds)\n\n return True\n\n def control_exabgp(self, intf):\n num_routes = self.utils.intval(intf.bgp_kws, \"num_routes\", 0)\n ns = \"{}_{}\".format(intf.name, 0)\n prefix = intf.bgp_kws.get(\"prefix\", \"\")\n if not prefix:\n msg = \"Prefix not specified num_routes={}\".format(num_routes)\n self.logger.error(msg)\n #return False\n for index in range(num_routes):\n cwd = os.getcwd()\n envfile = \"{}/logs/current/exabgpd_{}.env\".format(cwd, ns)\n remote_ipv6_addr = intf.bgp_kws.get(\"remote_ipv6_addr\", \"\")\n if remote_ipv6_addr:\n cmd = textwrap.dedent(\"\"\"\n ip netns exec ns_{0} exabgpcli --env {1} \\\n announce route {2}/128 next-hop self\n \"\"\".format(ns, envfile, prefix))\n prefix = self.utils.incrementIPv6(prefix, \"0:0:0:1::\")\n else:\n cmd = textwrap.dedent(\"\"\"\n ip netns exec ns_{0} exabgpcli --env {1} \\\n announce route {2}/24 next-hop self\n \"\"\".format(ns, envfile, prefix))\n prefix = self.utils.incrementIPv4(prefix, \"0.0.1.0\")\n output = self.utils.cmdexec(cmd)\n if \"could not send command to ExaBGP\" in output:\n self.logger.error(output)\n return False\n return True\n\n def config_exabgp(self, enable, intf):\n retval = self.config_exabgp_one(enable, intf)\n if retval and enable:\n retval = self.control_exabgp(intf)\n return retval\n\n def config_bgp(self, enable, intf):\n return self.config_exabgp(enable, intf)\n #self.config_yabgp(enable, intf)\n\n def config_igmp(self, mode, intf, host):\n ns = \"{}_{}\".format(intf.name, 0)\n igmp_version = host.get(\"igmp_version\", \"v3\")\n num_groups = self.utils.intval(host, \"grp_num_groups\", 1)\n ip4_addr = host.get(\"grp_ip_addr_start\", \"224.0.0.1\")\n ip4_step = \"0.0.1.0\"\n\n self.logger.error(\"TODO: config_igmp {} = {} {}\".format(mode, intf.iface, host))\n\n # force IGMP version\n if igmp_version == \"v2\":\n cmd = \"ip netns exec ns_{0} echo 2 > /proc/sys/net/ipv4/conf/veth1/force_igmp_version\".format(ns)\n else:\n cmd = \"ip netns exec ns_{0} echo 3 > /proc/sys/net/ipv4/conf/veth1/force_igmp_version\".format(ns)\n self.utils.cmdexec(cmd)\n\n # add ip addresses\n for i in range(num_groups):\n if mode in [\"start\", \"join\"]:\n cmd = \"ip netns exec ns_{0} ip addr add {1}/32 dev veth1 autojoin\".format(ns, ip4_addr)\n else:\n cmd = \"ip netns exec ns_{0} ip addr del {1}/32 dev veth1\".format(ns, ip4_addr)\n self.utils.cmdexec(cmd)\n ip4_addr = self.utils.incrementIPv4(ip4_addr, ip4_step)\n\n\nif __name__ == '__main__':\n from port import ScapyStream\n from ut_streams import ut_stream_get\n\n Logger.setup()\n iface = sys.argv[1] if len(sys.argv) > 1 else None\n packet = ScapyPacket(iface, 3, bool(not iface), False)\n\n kwargs = ut_stream_get(0)\n kwargs = ut_stream_get(0, mac_dst_mode='list', mac_dst=[\"00.00.00.00.00.02\", \"00.00.00.00.00.04\", \"00.00.00.00.00.06\"])\n kwargs = ut_stream_get(0, mac_src_mode='list', mac_src=\"00.00.00.00.00.02 00.00.00.00.00.04 00.00.00.00.00.06\")\n s = ScapyStream(0, None, None, **kwargs)\n pwa = packet.build_first(s)\n while pwa:\n packet.logger.info(\"=======================================================\")\n packet.logger.info(kwargs)\n packet.logger.info(\"=======================================================\")\n # wait to proceed\n #if iface: raw_input(\"press any key to send packet\")\n packet.send_packet(pwa, iface)\n pwa = packet.build_next(pwa)\n if pwa: time.sleep(1)\n\n","repo_name":"ANISH-GOTTAPU/sonic-mgmt-anish","sub_path":"spytest/spytest/tgen/scapy/packet.py","file_name":"packet.py","file_ext":"py","file_size_in_byte":56469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"808392913","text":"from functools import reduce\nfrom console import console\nfrom datetime import datetime\nfrom handle_date import Date\nfrom handle_cvs import Sold, Inventory\n\n\nday = Date.get_date()\n\n\ndef handleSell(parsed_Data, csv_path):\n name = parsed_Data.name.lower()\n price = parsed_Data.price\n amount = parsed_Data.amount\n sold = 0\n\n # sort on expiration_date\n Inventory.sortOnDate(\"inventory\")\n\n # Go through the inventory and get the product_name\n inStockTotal = Inventory.getAllItemsByName(name)\n\n # select not expired\n inStockTotalNotExpiredByName = []\n for dict in inStockTotal:\n expiration_date = datetime.strptime(\n dict['expiration_date'], \"%Y-%m-%d\")\n check_date = datetime.strptime(\n day, \"%Y-%m-%d\")\n if expiration_date < check_date:\n pass\n else:\n inStockTotalNotExpiredByName.append(dict)\n\n # Check how much of the item is in stock.\n inStock = inStockTotalNotExpiredByName\n\n inStockAmount = reduce(\n lambda x, y: x + y, [d[\"amount\"] for d in inStock], 0)\n\n # Loop that handles the selling of the products.\n while amount > 0:\n if inStock:\n for stock in inStock:\n if amount > inStockAmount:\n console.print(\n f' [red bold reverse] ERROR:You can only sell {inStockAmount} {name}'\n )\n amount = 0\n break\n elif amount > stock[\"amount\"] and inStockAmount != 0:\n if amount == stock[\"amount\"]:\n console.print(\"[green bold reverse]OK\")\n amount -= stock[\"amount\"]\n inStockAmount -= stock[\"amount\"]\n sold += stock[\"amount\"]\n Sold.appendToCsv(\n stock[\"id\"], name, stock[\"amount\"], day, price)\n Inventory.removeFromCsv(int(stock[\"id\"]), csv_path)\n continue\n\n else:\n Sold.appendToCsv(\n stock[\"id\"], name, amount, day, price)\n if amount == stock[\"amount\"]:\n Inventory.removeFromCsv(int(stock[\"id\"]), csv_path)\n else:\n Inventory.adjustCsv(int(stock[\"id\"]), amount, csv_path)\n # Set amount to 0 to reset the loop\n amount = 0\n sold += amount\n console.print(\"[green bold reverse]OK\")\n break\n else:\n console.print(\"[red bold reverse]ERROR: Product not in stock.\")\n break\n","repo_name":"BADijkman/Super_Py","sub_path":"handle_sell.py","file_name":"handle_sell.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10710428172","text":"import queue\nfrom threading import Thread\nimport time\nimport random\n\nnum = 0\n\ndef product(tasks):\n global num\n while True:\n time.sleep(random.randint(1,3))\n num += 1\n tasks.put(num)\n print('product num {}'.format(num))\n\n\n\ndef consume(tasks):\n while True:\n time.sleep(random.randint(3,6))\n consume_num = tasks.get()\n print('consume num {}'.format(consume_num))\n\n\ndef main():\n tasks = queue.Queue()\n t1 = Thread(target=product,args=(tasks,))\n t2 = Thread(target=consume, args=(tasks,))\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n\n\nif __name__=='__main__':\n main()","repo_name":"wyu0430/wangyu","sub_path":"多线程基础练习/queuestudy.py","file_name":"queuestudy.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7858543966","text":"import torch\nimport torchtext\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nglove = torchtext.vocab.GloVe(name=\"6B\", # trained on Wikipedia 2014 corpus\n dim=100) # embedding size = 50\n\ndef print_closest_words(vec, n=5):\n dists = torch.norm(glove.vectors - vec, dim=1) # compute distances to all words\n lst = sorted(enumerate(dists.numpy()), key=lambda x: x[1]) # sort by distance\n for idx, difference in lst[1:n+1]: # take the top n\n print(glove.itos[idx], \"\\t%5.2f\" % difference)\n\ndef print_closest_cosine_words(vec, n=5):\n # Compute the cosine similarity between the input vector and all the words\n similarities = cosine_similarity(vec.reshape(1, -1), glove.vectors)\n \n # find the n most similar words\n top_indices = np.argsort(similarities[0])[::-1][:n]\n\n # print the most similar words and their similarities\n for idx in top_indices:\n similarity = similarities[0][idx]\n print(glove.itos[idx], \"\\t%5.2f\" % similarity)\n\ndef compare_word_similarities(word):\n print(f\"compare '{word}' word similar to:\")\n print(\"\\ncosine: \\nword\\tcosine similarity\")\n print_closest_cosine_words(glove[word], n=10)\n \n print(\"\\nEuclidean: \\nword\\tEuclidean distance\")\n print_closest_words(glove[word], n=10)\n\nif __name__ == '__main__':\n compare_word_similarities('dog')\n print(\"------------------------------------\")\n compare_word_similarities('computer')","repo_name":"BrianQJN/Natrual_Language_Processing_Project","sub_path":"A1/Submission/A1P1_2.py","file_name":"A1P1_2.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34272353285","text":"# -*- coding: UTF-8 -*-\n\"\"\"\nDisplay data for steps of analysis in Section 4.\nHeikal Badrulhisham , 2019\n\"\"\"\nimport csv\nimport os\nimport math\nimport numpy\nfrom collections import defaultdict\nfrom scipy import stats\n\n\ndef freq_filter(rows):\n return [r for r in rows if float(r[12]) >= 100 and float(r[13]) >= 100]\n\n\ndef rr_ranges():\n \"\"\"\n Display risk ratio by ranges for Table 4.1-1.\n \"\"\"\n rr = [float(d[1]) for d in data]\n rr_ci = [float(d[8]) for d in data]\n\n measures = [(rr, 'Risk ratio'), (rr_ci, 'Risk ratio CI')]\n\n for measure in measures:\n upto_1 = len([d for d in measure[0] if d <= 1])\n below_2 = len([d for d in measure[0] if 1 < d < 2])\n above_2 = len([d for d in measure[0] if 2 <= d])\n\n print(measure[1])\n print(f'RR below 1: {upto_1} ({upto_1 / len(measure[0])}%)')\n print(f'RR below 2: {below_2} ({below_2 / len(measure[0])}%)')\n print(f'RR above 2: {above_2} ({above_2 / len(measure[0])}%)')\n\n\ndef register():\n \"\"\"\n Show ranges of risk ratio by register for Table 4.1-4.\n \"\"\"\n for reg in ['', '_written', '_spoken']:\n file_path = f'association_stats{reg}/000__association_stats{reg}.csv'\n\n with open(file_path, 'r') as f:\n data = [r for r in csv.reader(f)][1:]\n data = freq_filter(data)\n\n total = len(data)\n up_to_1 = len([r for r in data if float(r[1]) <= 1])\n more_than_1 = len([r for r in data if float(r[1]) > 1])\n\n print(f'Pair types in {reg} register')\n print(f'Up to 1: {up_to_1} ({round(100*up_to_1/total)}%)')\n print(f'More than 1: {more_than_1} ({round(100*more_than_1/total)}%)')\n print(total, '\\n')\n\n total = sum([int(r[-3]) for r in data])\n up_to_1 = sum([int(r[-3]) for r in data if float(r[1]) <= 1])\n more_than_1 = sum([int(r[-3]) for r in data if float(r[1]) > 1])\n\n print(f'Pair instances in {reg} register')\n print(f'Up to 1: {up_to_1} ({round(100*up_to_1/total)}%)')\n print(f'More than 1 {more_than_1} ({round(100*more_than_1/total)}%)')\n print(total, '\\n')\n\n\ndef adjacency():\n \"\"\"\n Display data related to risk ratio and suffix adjacency for Table 13.\n Run ANOVA on effect of adjacency for Section 4.1.\n \"\"\"\n # Get collocate pairs with risk ratio above 1\n data_ = [row for row in data if float(row[1]) > 1]\n\n # Get adjacency frequencies\n def f(x): return [float(r[1]) for r in data_ if x(int(r[-2])/int(r[-3]))]\n\n adjacent = f(lambda x: x == 1)\n subadjacent = f(lambda x: 0 < x < 1)\n nonadjacent = f(lambda x: x == 0)\n\n # Display adjacent frequencies and related data\n def g(x, y): print(x, len(y), sum(y)/len(y), numpy.var(y), numpy.median(y))\n\n print('Pair frequency, average RR,variance, median')\n g('Adjacent:', adjacent)\n g('Subadjacent:', subadjacent)\n g('Nonadjacent:', nonadjacent)\n\n # Conduct tests on adjacency\n print('\\n')\n print(stats.ttest_ind(adjacent, nonadjacent, equal_var=True))\n print(stats.ttest_ind(adjacent, nonadjacent, equal_var=False))\n print('Levene\\'s test:', stats.levene(adjacent, subadjacent, nonadjacent))\n print('1-way ANOVA:', (stats.f_oneway(adjacent, subadjacent, nonadjacent)))\n print('Pearson correlation:', stats.pearsonr([float(r[1]) for r in data],\n [int(r[-1]) for r in data]))\n\n\ndef asymmtery():\n \"\"\"\n Display ratios of risk ratio to risk ratio reverse for Table 14.\n \"\"\"\n # Get collocate pairs with risk ratio above 1\n data_ = [row for row in data if float(row[1]) > 1]\n\n # Get ratios\n def f(x): return max(float(x[1])/float(x[2]), float(x[2])/float(x[1]))\n\n atleast_2 = [f(d) for d in data_ if f(d) >= 2]\n atleast_2_types = [d[0] for d in data_ if f(d) >= 2]\n below_2 = [f(d) for d in data_ if f(d) < 2]\n\n # Display data\n print(len(atleast_2), min(atleast_2), max(atleast_2))\n print(len(below_2), min(below_2), max(below_2))\n\n for e in atleast_2_types:\n print(e)\n\n\ndef has_subordinate():\n \"\"\"\n Count trigrams containing one of the subordinate suffixes for Section 4.2,\n approximately on page 57.\n \"\"\"\n # Open data files\n with open('trigram/suffix_trigrams.txt', 'r') as f:\n trigram_lines = f.read().split('\\n')\n\n # Count trigrams with a subordinate suffix\n subordinates = ['Inf2→Noun', 'PastPart→Noun', 'FutPart→Noun']\n has_subordinate = 0\n\n for trigram_line in trigram_lines:\n # Get information from trigram file line\n trigram, trigram_freq = trigram_line.split(') ')\n\n if any([suffix in trigram for suffix in subordinates]):\n has_subordinate += 1\n\n print('Number of trigrams with a subordinate marker: ', has_subordinate)\n\n\ndef test_normality():\n \"\"\"\n Test the main risk ratio data for normality for Section 4.3, approximately\n on page 58.\n \"\"\"\n # Get risk ratios\n data_ = [d[1] for d in data]\n\n # Run normality test\n print('Shapiro-Wilk test for normality:')\n print(stats.shapiro([math.log(float(d)) for d in data_]))\n\n\ndef integrity():\n \"\"\"\n Display ranges of integrity ratios for Table 18.\n \"\"\"\n # Get collocate pairs of different formulaicity\n all_pairs = [row for row in data]\n formulaic_pairs = [row for row in data if float(row[1]) > 1]\n nonformulaic_pairs = [row for row in data if float(row[1]) <= 1]\n\n for subdataset in [all_pairs, formulaic_pairs, nonformulaic_pairs]:\n\n def f(x):\n return len([r for r in subdataset if x(float(r[-2])/float(r[-3]))])\n\n exactly_1 = f(lambda x: x == 1)\n half = f(lambda x: 0.5 <= x < 1)\n below_half = f(lambda x: 0 < x < 0.5)\n zero = f(lambda x: x == 0)\n\n print(exactly_1, half, below_half, zero)\n print(exactly_1/len(subdataset), half/len(subdataset),\n below_half/len(subdataset), zero/len(subdataset))\n\n\ndef trigram_link_ratios():\n \"\"\"\n Display ranges of trigram link ratios for Table 19.\n \"\"\"\n # Get trigrams\n with open('trigram/suffix_trigrams.txt', 'r') as f:\n trigram_lines = f.read().split('\\n')\n\n # Store risk ratio of suffix pairs\n data_ = dict(zip([r[0] for r in data if float(r[1]) > 1], [float(r[1]) for r in data if float(r[1]) > 1]))\n\n # Store risk ratios\n risk_ratios = []\n\n # Get risk ratios of stem-trigrams\n for trigram_line in trigram_lines:\n # Get information from trigram file line\n trigram = trigram_line.split(') ')[0]\n\n # Form tuples from trigram strings\n trigram = trigram[1:].split(', ')\n trigram = tuple([suffix[1:-1] for suffix in trigram])\n\n # Get constituent bigrams within the trigram\n bigrams = [(trigram[0], trigram[1]), (trigram[1], trigram[2])]\n\n # Get risk ratio of each bigram\n try:\n curr_rr = (data_[str(bigrams[0])], data_[str(bigrams[1])])\n\n if all([rr > 1 for rr in curr_rr]):\n risk_ratios.append(curr_rr)\n except KeyError:\n continue\n\n # Get ranges of risk ratio ratios\n def f(x): return len([r for r in risk_ratios\n if x(min(r[0]/r[1], r[1]/r[0]))])\n\n rr_ratio_1 = f(lambda x: 0.9 <= x <= 1)\n rr_ratio_2 = f(lambda x: 0.5 <= x < 0.9)\n rr_ratio_3 = f(lambda x: 0.1 <= x < 0.5)\n rr_ratio_4 = f(lambda x: x < 0.1)\n\n # Display data\n print('0.9 ≤ x ≤ 1:', rr_ratio_1, rr_ratio_1/len(risk_ratios))\n print('0.5 ≤ x < 0.9:', rr_ratio_2, rr_ratio_2/len(risk_ratios))\n print('0.1 ≤ x < 0.5:', rr_ratio_3, rr_ratio_3/len(risk_ratios))\n print('x < 0.1:', rr_ratio_4, rr_ratio_4/len(risk_ratios))\n\n\ndef stem_trigram_formulas():\n \"\"\"\n Tell how many stem-trigram pairs have a risk ratio above 1 for\n approximately page 62.\n \"\"\"\n with open('trigram/stem_trigram_rr.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = [r for r in data if float(r[4]) >= 100 and float(r[5]) >= 100]\n\n print(len([r for r in data if float(r[2]) > 1]))\n print(len(data))\n\n\ndef stem_by_trigram():\n \"\"\"\n Tell how many verbs are associated with certain trigrams for Table 20.\n \"\"\"\n with open('trigram/stem_trigram_rr.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = [r for r in data if float(r[4]) >= 100 and float(r[5]) >= 100]\n\n above_1 = defaultdict(int)\n up_to_1 = defaultdict(int)\n num_hosting_verbs = defaultdict(int)\n risk_ratios = defaultdict(list)\n\n for row in data:\n trigram = row[1]\n risk_ratio = float(row[2])\n\n num_hosting_verbs[trigram] += 1\n risk_ratios[trigram].append(risk_ratio)\n\n if risk_ratio > 1:\n above_1[trigram] += 1\n if risk_ratio <= 1:\n up_to_1[trigram] += 1\n\n for trigram in above_1:\n print(trigram)\n\n print('\\n')\n\n for trigram in above_1:\n print(up_to_1[trigram],\n '({0:.0%})'.format(up_to_1[trigram]/num_hosting_verbs[trigram]))\n\n print('\\n')\n\n for trigram in above_1:\n print(above_1[trigram],\n '({0:.0%})'.format(above_1[trigram] / num_hosting_verbs[trigram]))\n\n print('\\n')\n\n for trigram in above_1:\n print(f'{round(math.log(min(risk_ratios[trigram]), 2), 2)}'\n f' - {round(math.log(max(risk_ratios[trigram]), 2), 2)}')\n\n\n# Get the formula frequency and proportion associated with verb types\ndef rr_dist(fpaths=[], save_file_name='rr_dist_by_verbs.csv'):\n save_rows = []\n\n for fpath in fpaths:\n with open(fpath, 'r') as f:\n rows = [r for r in csv.reader(f)][1:]\n\n if not rows:\n continue\n\n f_freq = sum([int(r[-1]) for r in rows if float(r[1]) > 1])\n num_f_types = len([r for r in rows if float(r[1]) > 1])\n inst_sum = sum([int(r[-1]) for r in rows])\n type_sum = len([r for r in rows])\n save_rows.append([fpath.split('_')[2], f_freq, f_freq/inst_sum,\n num_f_types, num_f_types/type_sum])\n\n with open(save_file_name, 'w') as f:\n row_1 = ['verb_lemma', 'formula_freq', 'formula_freq_norm',\n 'num_formula', 'formula_prop']\n\n csv.writer(f).writerow(row_1)\n csv.writer(f).writerows(save_rows)\n\n\n# Show how many collocate pairs appear with how many verb types\ndef top_pairs(fpaths):\n pair_count_byverbs = defaultdict(int)\n overall_rr = dict()\n\n # Tally verb type occurrences\n for fpath in fpaths:\n with open(fpath, 'r') as f:\n rows = [r for r in csv.reader(f)][1:]\n\n for r in rows:\n pair_count_byverbs[r[0]] += 1\n\n if '000' in fpath:\n overall_rr[r[0]] = r[1]\n\n # Display number of collocate pairs in different ranges of verb type freq.\n for i in range(8):\n pairs = [p for p in pair_count_byverbs\n if i*100 <= pair_count_byverbs[p] < (i+1)*100]\n\n print(i*100, (i+1)*100)\n print(len(pairs), '\\n')\n\n # Get the most verb-frequent collocate pairs\n keys = [k for k in pair_count_byverbs]\n keys.sort(reverse=True, key=lambda x: pair_count_byverbs[x])\n\n for p in keys[:81]:\n print(f'{p}\\t\\t\\t\\t{pair_count_byverbs[p]}\\t\\t\\t\\t{overall_rr[p]}')\n\n return keys[:20]\n\n\n# Find the trend of the RR of a pair across verbs\ndef cross_verb_trend(fpaths):\n # Get all pairs in whole dataset\n with open(fpaths[-1], 'r') as f:\n target_pairs = [r[0] for r in csv.reader(f)][1:]\n\n target_rrs = dict(zip(target_pairs,\n [defaultdict(lambda:'') for t in target_pairs]))\n\n # Get RR of collocate pairs across verb files\n for fpath in fpaths:\n curr_stem = fpath.split('_')[2]\n\n with open(fpath, 'r') as f:\n for r in [r_ for r_ in csv.reader(f)][1:]:\n target_rrs[r[0]][curr_stem] = r[1]\n\n # Save data\n with open('cross_verb_trends.csv', 'w') as f:\n stems = [fpath.split('_')[2] for fpath in fpaths]\n row_1 = ['Pair'] + [s for s in stems] + ['Verb_type_frequency']\n rows = [[k] + [target_rrs[k][s] for s in stems] +\n [len([s for s in target_rrs[k] if target_rrs[k][s]]) - 1]\n for k in target_rrs]\n\n csv.writer(f).writerow(row_1)\n csv.writer(f).writerows(rows)\n\n\ndef formulas():\n for r in data:\n if int(r[-3]) == int(r[-2] or int(r[-4]) == int(r[-2])):\n print(r[0])\n\n\nif __name__ == '__main__':\n data_dir = os.listdir('association_stats/')\n data_dir.sort()\n data_files = [os.path.join('association_stats/', fp) for fp in data_dir]\n\n with open(f'association_stats/000__association_stats.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = freq_filter(data)\n\n # rr_ranges()\n # register()\n # adjacency()\n # asymmtery()\n # has_subordinate()\n # test_normality()\n # integrity()\n trigram_link_ratios()\n # stem_trigram_formulas()\n # stem_by_trigram()\n # rr_dist(data_files)\n # tops = top_pairs(data_files)\n # cross_verb_trend(data_files)\n # test_normality('association_stats/000__association_stats.csv')\n # formulas()\n\n exit(0)\n","repo_name":"heikalb/thesis-scripts","sub_path":"d5_statistics/archive/risk_ratio_analysis.py","file_name":"risk_ratio_analysis.py","file_ext":"py","file_size_in_byte":13185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23591439460","text":"# 25Chatting.py\n\nfrom socket import *\n\nPORT = 10000\n\ndef udpClient():\n client = socket(AF_INET, SOCK_DGRAM) # UDP : User DataGram Protocol\n client.sendto('Hello I am Python'.encode(), (\"127.0.0.1\", PORT))\n s, addr = client.recvfrom(1024)\n print(s)\n\nif __name__ == \"__main__\":\n udpClient()\n\n\n","repo_name":"JoobeeJung/python","sub_path":"25Chatting.py","file_name":"25Chatting.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40491264751","text":"\"\"\"\nFILE I/O\nAuthor: Murphy Studebaker\nWeek of November 4\n\nSometimes you want to write a program that reads from or\nwrites to external files. Common use cases for this are reading\nin data as an input for some sort of processing (scraping data off websites,\ndoing some sort of analysis) or persisting data after your program has ended \n(saving user contacts or information that can be loaded back in)\n\"\"\"\n\n# FILE OBJECTS\n# To access a file, you must first \"open\" it\ndocument = open('text.txt','r')\n# now the variable 'document' is a file object\n# for the file 'text.txt' in read mode\n\n# ACCESS MODES\n# determine the program's ability to manipulate the file\n# r : read only\n# w : write, replaces what is in the file with new content\n# a : append, writes new content to the end and keeps existing content\n# r+ : read and write mode\n\n# must close the document when you are done with it\ndocument.close() \n\n# READING FROM A FILE\n# Files are read through a buffer, so text is consumed\n# and the buffer moves forward. To go back to the beginning\n# of a file, you must close it and open it again.\ndocument = open('text.txt','r')\nwhole_document = document.read()\ndocument.close()\n\ndocument = open('text.txt','r')\ndocument_as_list = document.readlines()\ndocument.close()\n\nprint(whole_document)\nprint(document_as_list)\n\n# Loop through each sentence\ndocument = open('text.txt','r')\nfor line in document:\n # the variable 'line' is automatically assigned each line in the file\n sentence = line.replace('\\n','')\n print(sentence + \"!\")\ndocument.close()\n\n# WRITING TO A FILE\ndocument = open('text.txt','w')\ndocument.close()\n# notice that the tabs and newlines are kept in the file\n# also, that 'w' mode replaces everything in the original file\n\ndocument = open('text.txt','a')\ndocument.write('ART FROM THE INTERNET')\ndocument.close()\n\n# MAKING THINGS EASIER WITH 'WITH' STATEMENTS\n# using with automatically closes the file once it's done\nwith open('text.txt','r+') as f:\n n = 0\n for line in f:\n line = line.replace('\\n', '')\n print(str(n) + line)\n n +=1\n f.write(str(n))\n\n\"\"\"\n# PRACTICE: ADAPTING PIGLATIN PROGRAM\n# Modify your piglatin program to read and write from a file\n# Your program should read in a file, \n# pass each word in to your word_to_pig function,\n# then replace the contents of the file with the version in piglatin.\n\"\"\"\n\ndef word_to_pig(word):\n pig_word = word.lower()\n if word[0] in \"aeiou\":\n # starts with a vowel, add yay\n pig_word += 'yay'\n else:\n pig_word = pig_word[1:] + pig_word[0] + \"ay\"\n return pig_word[0].upper() + pig_word[1:]\n\npig_version = \"\"\nsentences = []\nwith open('piggy.txt','r') as f:\n for line in f:\n sentence = line.split()\n pig_sentence = []\n for word in sentence:\n pig_sentence.append(word_to_pig(word))\n sentences.append(' '.join(pig_sentence))\n\nwith open('piggy.txt','w') as f:\n for sentence in sentences:\n f.write(sentence + '\\n')\n #f.writelines(sentences) #can write a list to the file with writelines() function\n\n\"\"\"\nERRORS\n\nAs I'm sure you're familiar with, sometimes operations in your program can produce errors.\nThese are useful when debugging to eliminate incorrect coding practices in your program,\nbut sometimes an unexpected user input can cause an otherwise functioning program to crash.\nTo account for this, Python lets you implement try/except statements.\n\"\"\"\ntry:\n #the block of code to attempt to run\n doesnt_exist = open('does_not_exist.txt','r')\nexcept FileNotFoundError:\n # if a FileNotFoundError occurs in the try block, this runs\n print(\"That file does not exist.\")\nexcept:\n # if any other type of error occurs in the try block, this runs\n print(\"Something happened. Sorry :(\")\n# make sure to order your except statements in order of specificity,\n# leaving the general 'except' for last\n# so you can give specific debugging info to the user\n","repo_name":"MurphyStudebaker/intro-to-python","sub_path":"8-Files.py","file_name":"8-Files.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12426475922","text":"import argparse\nimport json\nimport os\nimport subprocess\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple\n\nimport openai\nimport requests\nfrom rich.console import Console\nfrom rich.markdown import Markdown\nfrom rich.table import Table\n\nOPENAI_GPT_TYPES = {\n \"gpt-3.5-turbo\", # ChatGPT (this is always updated if they have a new version)\n \"gpt-3.5-turbo-16k\", # ChatGPT with 16k token context --> apply this if the diff is too big (more expensive, ~2X the price of the normal one)\n \"gpt-3.5-turbo-0301\", # Older ChatGPT released in March 2022\n \"gpt-4\", # GPT-4 (this is more than ~10X the price compared to ChatGPT)\n}\n\n\ndef get_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(description=\"Prometh Review - AI Pull Request Reviewer Companion\")\n parser.add_argument(\"--base-url\", type=str)\n parser.add_argument(\"-p\", \"--project-key\", type=str, help=\"Project key\")\n # TODO: include the choices for the GPT types\n parser.add_argument(\"--llm-type\", default=\"gpt-3.5-turbo\", type=str, help=\"LLM type. Default: gpt-3.5-turbo\")\n parser.add_argument(\n \"-c\",\n \"--nb-context-lines\",\n default=10,\n type=int,\n help=\n \"how much lines to include before and after a changed line? Reduce it if you have problems with the script. Default: 10\"\n )\n # TODO: repo slug could be extracted with a git command\n parser.add_argument(\"-r\", \"--repo\", required=True, type=str, help=\"Slug of the repo\")\n parser.add_argument(\"-id\", \"--pull-request-id\", required=True, type=int, help=\"ID for the pull request\")\n parser.add_argument(\"-d\",\n \"--show-diff-only\",\n action=\"store_true\",\n help=\"Show diff only, no AI checks, used mainly for debugging\")\n parser.add_argument(\"-i\",\n \"--interactive\",\n action=\"store_true\",\n help=\"You can continue to chat about the PR with the AI\")\n parser.add_argument(\n \"-e\",\n \"--exhaustive-analysis\",\n action=\"store_true\",\n help=\n \"Perform an exhaustive analysis of the PR which means that the newly added (whole) files will be included in the analysis\"\n )\n parser.add_argument(\"--show-prompt\",\n action=\"store_true\",\n help=\"Shows the prompt which is the input for the AI (mainly used for debugging)\")\n parser.add_argument(\"-x\",\n \"--exclude-file-types\",\n nargs=\"+\",\n default=[],\n help=\"Excluded file types from the analysis\")\n parser.add_argument(\"-s\",\n \"--service\",\n default=\"stash\",\n choices=[\"stash\", \"bitbucket\", \"github\", \"manual\"],\n help=\"Where do you have your PR?\")\n\n # If there is a ~/.promethrc.json file, update the defualts so the user does not have to define them\n prometh_rc_filepath = Path(\"~/.promethrc.json\").expanduser()\n if prometh_rc_filepath.exists():\n prometh_rc = json.load(prometh_rc_filepath.open())\n parser.set_defaults(**prometh_rc)\n\n args = parser.parse_args()\n\n if args.base_url is None:\n raise ValueError(\"Base URL is not set\")\n if args.project_key is None:\n raise ValueError(\"Project key is not set\")\n\n return args\n\n\nclass StashAPI:\n\n def __init__(self, base_url: str, project_key: str, repo_slug: str, pr_id: int) -> None:\n self.stash_token = os.environ[\"STASH_HTTP_ACCESS_TOKEN\"]\n if self.stash_token is None:\n raise ValueError(\"STASH_HTTP_ACCESS_TOKEN is not set\")\n self.base_url = base_url\n self.project_key = project_key\n self.repo_slug = repo_slug\n self.pr_id = pr_id\n\n self.headers = {\"Authorization\": f\"Bearer {self.stash_token}\", \"Accept\": \"application/json\"}\n # TODO: check if we can access the base URL (are we connected to the VPN?)\n\n def _get_repo_info_from_stash(self) -> dict:\n url_info = f\"http://{self.base_url}/rest/api/latest/projects/{self.project_key}/repos/{self.repo_slug}/pull-requests/{self.pr_id}\"\n response = requests.get(url_info, headers=self.headers)\n pr_info_data = response.json()\n return pr_info_data\n\n def _get_repo_diff_data_from_stash(self) -> dict:\n url_diff = f\"http://{self.base_url}/rest/api/latest/projects/{self.project_key}/repos/{self.repo_slug}/pull-requests/{self.pr_id}/diff\"\n response = requests.get(url_diff, headers=self.headers)\n pr_diff_data = response.json()\n return pr_diff_data\n\n def get_data(self) -> dict:\n pr_info_data = self._get_repo_info_from_stash()\n pr_diff_data = self._get_repo_diff_data_from_stash()\n\n # Last commit of the PR\n to_commit = pr_diff_data[\"toHash\"]\n # Commit before the first commit of the PR\n from_commit = pr_diff_data[\"fromHash\"]\n # Title of the PR\n pr_title = pr_info_data[\"title\"]\n # Description of the PR (this is optional)\n try:\n pr_description = pr_info_data[\"description\"]\n except KeyError:\n pr_description = \"\"\n # Link to the PR\n pr_link = pr_info_data[\"links\"][\"self\"][0][\"href\"]\n\n return {\n \"to_commit\": to_commit,\n \"from_commit\": from_commit,\n \"pr_title\": pr_title,\n \"pr_description\": pr_description,\n \"pr_link\": pr_link,\n }\n\n\ndef generate_llm_response(gpt_type: str, messages: List[dict]) -> Tuple[str, Optional[dict]]:\n response: str = \"No Response\"\n usage_metrics: Optional[dict] = None\n\n if gpt_type in OPENAI_GPT_TYPES:\n if \"OPENAI_API_KEY\" not in os.environ:\n raise ValueError(\"OPENAI_API_KEY is not set\")\n\n chat_completion: dict = openai.ChatCompletion.create(model=gpt_type, messages=messages)\n response = chat_completion[\"choices\"][0][\"message\"][\"content\"]\n usage_metrics = chat_completion[\"usage\"]\n else:\n # If it's not an OpenAI GPT type, it's a custom locally deployed GPT type\n # TODO: make this endpoint configurable & the parameters\n url = \"http://localhost:8080/v1/chat/completions\"\n headers = {\"Content-Type\": \"application/json\"}\n data = {\"model\": gpt_type, \"messages\": messages, \"temperature\": 0.7}\n resp = requests.post(url, headers=headers, data=json.dumps(data))\n response = resp.json()[\"choices\"][0][\"message\"][\"content\"]\n usage_metrics = None\n\n return response, usage_metrics\n\n\ndef analyze_pr_with_GPT(pr_title, pr_description, diff_output, gpt_type, print_prompt: bool = False):\n prompt = f\"\"\" You are a senior engineer and your task is the following:\n - Review the code changes provided in a diff and provide feedback\n - Separately point out the bugs, security issues, missed best-practices\n - If there are any bugs, highlight them (and use 'BUG' tag at the start of the line).\n - Does the code do what it says in the pull request title, description?\n - Do not do nitpicking, discard minor issues.\n - Use markdown headers and bullet points to organize your feedback\n - In your output use the following headers: '**Summary**', '**Bugs**', '**Security**', '**Best-practices**', '**Other**'\n - Provide security recommendations if there are any.\n - Provide details on missed use of best-practices.\n - Be concise and to the point.\n\n You are provided with the code changes (diffs) in a unidiff format.\n \"\"\"\n\n pr_description_message = f\"\"\"A description was given to help you assist in understand why these changes were made.\n The description was provided in a markdown format.\n\n Title: {pr_title}\n Description: {pr_description}\n \"\"\"\n\n # TODO: Truncate the diff if it is possible and if the diff is too big\n # (the change does not need to be here, this is just a reminder)\n diff_message = f\"\"\"Diff in unidiff format:\n\n {diff_output}\n \"\"\"\n\n final_message = \"\"\"All code changes have been provided.\n Please provide me with your concise code review based on all the changes, context & title provided\n \"\"\"\n\n messages = [{\n \"role\": \"system\",\n \"content\": prompt\n }, {\n \"role\": \"user\",\n \"content\": pr_description_message\n }, {\n \"role\": \"user\",\n \"content\": diff_message\n }, {\n \"role\": \"user\",\n \"content\": final_message\n }]\n\n if print_prompt:\n print(\"\\n\".join([message[\"content\"] for message in messages]))\n\n response, usage_metrics = generate_llm_response(gpt_type, messages)\n\n # Add the analyzis message to the message for the interactive chatting\n messages.append({\"role\": \"assistant\", \"content\": response})\n\n return response, usage_metrics, messages\n\n\ndef main():\n console = Console()\n args = get_args()\n\n table = Table(title=\"Arguments\")\n table.add_column(\"Argument\", justify=\"right\", style=\"bold\", no_wrap=True)\n table.add_column(\"Value\")\n for arg in vars(args):\n table.add_row(arg, str(getattr(args, arg)))\n console.print(table)\n\n # TODO: make it work with Bicbucket/Stash, GitHub, GitLab, w/out any of them (manual mode)\n api = None\n if args.service in {\"stash\", \"bitbucket\"}:\n api = StashAPI(args.base_url, args.project_key, args.repo, args.pull_request_id)\n elif args.service == \"github\":\n raise NotImplementedError(\"GitHub is not implemented yet\")\n else:\n print(\"Manual mode...\")\n raise NotImplementedError(\"Manual mode is not implemented yet\")\n try:\n data: dict = api.get_data()\n to_commit = data[\"to_commit\"]\n from_commit = data[\"from_commit\"]\n pr_title = data[\"pr_title\"]\n pr_description = data[\"pr_description\"]\n pr_link = data[\"pr_link\"]\n except Exception as e:\n print(f\"Error: {e}\")\n print(\"Problems... but I am not going to handle it. Happy debugging sucker\")\n exit(1)\n\n table = Table(title=\"Info\")\n table.add_column(\"Info\", justify=\"right\", style=\"bold\", no_wrap=True)\n table.add_column(\"Value\")\n for key, value in data.items():\n table.add_row(key, str(value))\n console.print(table)\n\n _cmd = [\"git\", \"diff\", \"--name-status\", from_commit, to_commit]\n diff_name_status_output = subprocess.check_output(_cmd, encoding=\"utf-8\")\n\n _diff_filter = \"M\"\n if args.exhaustive_analysis:\n # Include all the lines from the added files as well\n _diff_filter += \"A\"\n\n _cmd = [\"git\", \"diff\", f\"--diff-filter={_diff_filter}\", f\"-U{args.nb_context_lines}\", from_commit, to_commit]\n if len(args.exclude_file_types) > 0:\n _cmd.append(\"--\")\n _cmd.append(\".\")\n for t in args.exclude_file_types:\n _cmd.append(f\":(exclude)*{t}\")\n\n # print(f\"git diff command:\\n{' '.join(_cmd)}\\n\")\n\n diff_modified_output = subprocess.check_output(_cmd, encoding=\"utf-8\")\n if len(diff_modified_output) == 0:\n print(\"No file modifications in the PR. But new and deleted files might be present.\")\n\n diff_output = diff_name_status_output + \"\\n\\n\" + diff_modified_output\n\n if args.show_diff_only:\n console.print(\"[bold]Showing only the git diff output:[/bold]\\n\")\n console.print(diff_output)\n exit(0)\n\n if len(diff_output) == 0:\n raise Exception(\"No diff output. Something is wrong, but I don't care.\")\n\n analyzis_message, usage_metrics, messages = analyze_pr_with_GPT(pr_title, pr_description, diff_output,\n args.llm_type, args.show_prompt)\n\n if usage_metrics:\n table = Table(title=\"OpenAPI Usage Metrics\")\n table.add_column(\"Metric\", justify=\"right\", style=\"bold\", no_wrap=True)\n table.add_column(\"Value\")\n for key, value in usage_metrics.items():\n table.add_row(key, str(value))\n console.print(table)\n\n console.print(\"[magenta][bold]------ Analyzis: ------[/bold][/magenta]\\n\")\n analyzis_message_md = Markdown(analyzis_message)\n console.print(analyzis_message_md)\n print(\"\\n\")\n\n if args.interactive:\n console.print(\"[cyan][bold]------ Interactive chatting: ------[/bold][/cyan]\\n\")\n console.print(\"You can chat with the AI about the PR\")\n console.print(\"Press `CTRL+C` or type 'exit' [red]to exit[/red] the interactive chatting\\n\")\n\n while True:\n user_input = console.input(\"[magenta][bold]You:[/bold][/magenta] \")\n if user_input == \"exit\":\n exit(0)\n print(\"\\n\")\n messages.append({\"role\": \"user\", \"content\": user_input})\n response, _ = generate_llm_response(args.llm_type, messages)\n messages.append({\"role\": \"assistant\", \"content\": response})\n console.print(f\"[cyan][bold]AI:[/bold] {response}\\n[cyan]\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gaborvecsei/Prometh-Review","sub_path":"prometh/prometh_cli.py","file_name":"prometh_cli.py","file_ext":"py","file_size_in_byte":13016,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"11868443583","text":"from bs4 import BeautifulSoup\nimport geojson\n\n\nmaxPages = 6145\nfeatures = []\n\n\nclass NoOutletsFoundException(Exception):\n pass\n\n\ndef parsePage(pageContent):\n soup = BeautifulSoup(pageContent, \"html5lib\")\n\n if soup.find(\"div\", attrs={\"class\": \"no-outlets\"}):\n raise NoOutletsFoundException(\"No outlets found\")\n\n stores = soup.find(\"div\", attrs={\"class\": \"outlet-list\"})\n\n for store in stores.find_all(\"div\", attrs={\"class\": \"store-info-box\"}):\n lat = (\n store.find(\"input\", attrs={\"class\": \"outlet-latitude\"}).attrs[\"value\"] or 0\n )\n lng = (\n store.find(\"input\", attrs={\"class\": \"outlet-longitude\"}).attrs[\"value\"] or 0\n )\n name = (\n store.find(\"li\", attrs={\"class\": \"\"})\n .find(\"div\", attrs={\"class\": \"info-text\"})\n .text\n ).strip()\n address = (\n store.find(\"li\", attrs={\"class\": \"outlet-address\"})\n .find(\"div\", attrs={\"class\": \"info-text\"})\n .get_text(separator=\" \")\n ).strip()\n phone = (\n store.find(\"li\", attrs={\"class\": \"outlet-phone\"})\n .find(\"div\", attrs={\"class\": \"info-text\"})\n .text\n ).strip()\n timings = (\n store.find(\"li\", attrs={\"class\": \"outlet-timings\"})\n .find(\"div\", attrs={\"class\": \"info-text\"})\n .text\n ).strip()\n address, areacode = [x.strip() for x in address.split(\" - \")]\n url = (\n store.find(\"li\", attrs={\"class\": \"outlet-name\"})\n .find(\"div\", attrs={\"class\": \"info-text\"})\n .find(\"a\")\n .attrs[\"href\"]\n ).strip()\n\n features.append(\n geojson.Feature(\n geometry=geojson.Point((float(lng), float(lat))),\n properties={\n \"name\": name,\n \"address\": address,\n \"areacode\": areacode,\n \"phone\": phone,\n \"timings\": timings,\n \"url\": url,\n },\n )\n )\n\n\nfor i in range(1, maxPages + 1):\n try:\n with open(f\"responses/{i}.html\", \"r\") as f:\n parsePage(f.read())\n except NoOutletsFoundException:\n print(f\"No outlets found on page {i}\")\n except:\n print(f\"Error parsing page {i}\")\n\n\nfeatureCollection = geojson.FeatureCollection(features)\nwith open(\"indian_oil_fuel_stations.geojson\", \"w\") as f:\n geojson.dump(featureCollection, f)\n","repo_name":"MG-LSJ/indian_oil_fuel_stations","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71374399305","text":"import logging\nfrom datetime import date\nimport discord\nfrom discord.ext import commands\nimport json\n\n# Initiate json\nfile = open(\"config.json\")\ndata = json.load(file)\n\n# Logging\nlogging.basicConfig(\n filename=\"./logs/discordlogs.log\",\n filemode=\"w\",\n format=\"%(name)s - %(levelname)s - %(message)s\",\n)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# Public Vars\ndate = date.today()\ndateToday = date.strftime(\"%m-%d-%Y\")\n\n# Public vars\nleaveChat = data[\"channelIDs\"][\"leaveChat\"]\n\n\nclass LeaveListener(commands.Cog, name=\"Leave Listener\"):\n def __init__(self, bot):\n self.bot = bot\n self.leaveChatChannel = bot.get_channel(leaveChat)\n\n # When someone leaves, log it\n @commands.Cog.listener()\n async def on_member_leave(self, member):\n embed = discord.Embed(\n title=\"Member left\",\n description=f\"{member.mention} has left the server. RIP.\",\n color=discord.Color.red(),\n )\n embed.set_footer(text=f\"Member ID: {member.id} • Date: {dateToday}\")\n await self.leaveChatChannel.send(embed=embed)\n logger.info(f\"{member.name} with ID: {member.id} has left the server!\")\n\n\ndef setup(bot):\n bot.add_cog(LeaveListener(bot))\n","repo_name":"Aggis15/T4NK0R","sub_path":"Listeners/leaveListener.py","file_name":"leaveListener.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70519142665","text":"import time\r\nimport datetime\r\n\r\n\r\ndef log(log_file):\r\n\r\n def write_log(func):\r\n\r\n def wrapper(*args, **kwargs):\r\n start_time = time.time()\r\n res = func(*args, *kwargs)\r\n end_time = time.time()\r\n spend_time = end_time - start_time\r\n with open(log_file, 'a', encoding='utf-8') as file:\r\n file.write('%s函数%s开始执行,耗时%ss\\n' % (datetime.datetime.today(), func.__name__, spend_time))\r\n return res\r\n\r\n return wrapper\r\n\r\n return write_log\r\n\r\n\r\n@log('log_file.txt')\r\ndef count(n):\r\n for i in range(n):\r\n '哈哈哈'\r\n return 1\r\n\r\n\r\nif __name__ == '__main__':\r\n count(100000000)\r\n","repo_name":"aliluys/PythonAssignment","sub_path":"PythonStage02/ClosureFuc/闭包函数3.py","file_name":"闭包函数3.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40209124106","text":"mydict={\r\n \"book\":\"Dynamics\",\r\n \"Publisher\":\"longhorn\",\r\n \"year\":2001,\r\n \"Authors\":['Elon musk','Paul levesque'],\r\n \"Good\": True\r\n\r\n}\r\n# print(mydict.keys())\r\n# print(mydict.values())\r\n# print(mydict.get('book'))\r\n# print(mydict.items())\r\n\r\ndef outputNme(a):\r\n print(\"hi\",a)\r\ndef replacein(phrase):\r\n word =\"\"\r\n for letter in phrase:\r\n if letter.lower() in \"aeiou\":\r\n word = word+\"g\"\r\n else:\r\n word=word+letter\r\n return word\r\n \r\nprint(replacein(input(\"Enter phrase: \")))\r\n","repo_name":"janicecodes/stem-training","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"22716202224","text":"import urllib.request,ast,requests,re\nfrom bs4 import BeautifulSoup\nfrom lxml import etree\n\n\n\ndef return_html(url):\n fp = urllib.request.urlopen(url)\n mybytes = fp.read()\n source = mybytes.decode(\"utf8\")\n return source\n\ndef html_from_query(query):\n base_url = \"https://web.stagram.com/search?query=\"\n return return_html(base_url+query)\n\ndef get_usernames(query):\n html = html_from_query(query)\n soup = BeautifulSoup(html, \"html.parser\" )\n frame_div = soup.findAll('div', attrs={'class':'row photolist'})\n newsoup = BeautifulSoup(str(frame_div), \"html.parser\" )\n unames = []\n for a in newsoup.find_all('a', href=True):\n url = a['href']\n length = len(url.split(\"/\")) - 1\n if not length > 1:\n uname = url[1:]\n unames.append(uname)\n return unames\n\ndef get_bio(handle, proxies=None):\n url = 'https://www.instagram.com/{}/'.format(handle)\n attributes = {}\n response = requests.get(url, proxies=proxies)\n response.raise_for_status()\n if response.ok:\n root = etree.HTML(response.content)\n data_raw = root.xpath(\"//script[contains(text(), 'entry_data')]\")[0].text\n data_raw = data_raw[data_raw.find('{'): data_raw.rfind('}') + 1]\n data_raw = data_raw.replace('false', 'False')\n data_raw = data_raw.replace('true', 'True')\n data_raw = data_raw.replace('null', 'None')\n data_dict = ast.literal_eval(data_raw)\n d=data_dict['entry_data']['ProfilePage'][0]['graphql']['user']\n bio = d['biography']\n return repr(bio)\n\ndef get_emails(uname):\n bio = get_bio(uname)\n r = re.compile(r\"(\\w(?:[-.+]?\\w+)+\\@(?:[a-z0-9](?:[-+]?\\w+)*\\.)+[a-z]{2,})\", re.I)\n regex = r.findall(bio)\n if bool(regex):\n #print(regex)\n print(\"[+] \"+uname)\n print(\" | \"+\"\\n[+] \".join(regex))\n\ndef return_list(unames_string):\n all_unames = []\n unames_string = unames_string.split(\",\")\n for uname in unames_string:\n all_unames += get_usernames(uname.strip())\n return all_unames\n\nif __name__ == \"__main__\":\n hashtags = \"hashtag1,hashtag2,hashtag3\"\n all_unames = return_list(\n \n )\n for uname in all_unames:\n get_emails(uname)\n print(\"[e] End\")\n","repo_name":"bufferbandit/instagram_bio_scraper","sub_path":"insta_bio_email_scraper.py","file_name":"insta_bio_email_scraper.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"8180082274","text":"import datetime\nfrom modelNotes import ModelNotes\nfrom presenter import Presenter\nfrom note import Note\nfrom view import View\n\n\ndef main():\n p = Presenter(ModelNotes(\"notes.json\"), View())\n\n print(\"Введите команду:\")\n print(\"1 - добавить заметку\")\n print(\"2 - прочитать список заголовков заметок c номером id\")\n print(\"3 - найти заметку по номеру id\")\n print(\"4 - редактировать заметку по номеру id\")\n print(\"5 - удалить заметку\")\n print(\"Выйти из меню - нажмите \")\n\n try:\n while True:\n user_choise = int(input())\n if user_choise == 1:\n p.create_note(note_input())\n elif user_choise == 2:\n p.show_title_notes()\n elif user_choise == 3:\n note_id = int(\n input(\"Введите номер id заметки, которую хотите найти:\"))\n if p.note_id_exist(note_id) == True:\n p.show_note(note_id)\n elif user_choise == 4:\n note_id = int(\n input(\"Введите номер id заметки, которую хотите редактировать:\"))\n if p.note_id_exist(note_id) == True:\n date = datetime.datetime.now().strftime(\"%Y-%b-%d %H:%M\")\n title = input('Введите заголовок заметки: ')\n text = input('Введите текст заметки: ')\n p.update_note(note_id, date, title, text)\n elif user_choise == 5:\n note_id = int(\n input(\"Введите номер id заметки, которую хотите удалить:\"))\n if p.note_id_exist(note_id) == True:\n p.delete_note(note_id)\n else:\n print(\"Число должно быть от одного до пяти\")\n except ValueError:\n print(\"Выход из программы\")\n\n\ndef note_input():\n id = 0\n date = datetime.datetime.now().strftime(\"%Y-%b-%d %H:%M\")\n title = input('Введите заголовок заметки: ')\n text = input('Введите текст заметки: ')\n return Note(id, date, title, text)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alex188w/Interim_Work_1_Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71635023304","text":"# -*- coding:utf-8 -*-\r\n# ! usr/bin/env python3\r\n\"\"\"\r\nCreated on 13/04/2021 9:57\r\n@Author: XINZHI YAO\r\n\"\"\"\r\n\r\nimport logging\r\n\r\nfrom collections import defaultdict\r\n\r\nimport torch\r\nfrom torch.utils.data import DataLoader, Dataset\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass RE_Dataset(Dataset):\r\n def __init__(self, paras, mode: str):\r\n if mode == 'train':\r\n logger.info('loading train dataset.')\r\n self.data_path = paras.train_file\r\n elif mode == 'test':\r\n logger.info('loading test dataset.')\r\n self.data_path = paras.test_file\r\n else:\r\n raise ValueError(f'mode must be \"train\" or \"test\",'\r\n f' but got \"{mode}\".')\r\n\r\n self.ner_label_file = paras.ner_label_file\r\n self.special_token_set = set()\r\n\r\n self.load_denotation = paras.load_denotation\r\n self.add_denotation_span = paras.add_denotation_span\r\n\r\n self.label_file = paras.label_file\r\n\r\n self.data_statistics = defaultdict(int)\r\n self.data = []\r\n self.label = []\r\n\r\n self.label_set = set()\r\n self.label_to_index = {}\r\n self.index_to_label = {}\r\n\r\n self.load_special_token()\r\n self.read_label()\r\n self.read_data()\r\n\r\n if len(self.data_statistics.keys()) > 3:\r\n logger.warning('Too many relations in data set.')\r\n logger.warning(self.data_statistics.keys())\r\n\r\n themeof_count = self.data_statistics['ThemeOf']\r\n causeof_count = self.data_statistics['CauseOf']\r\n norelation_count = self.data_statistics['NoRelation']\r\n\r\n logger.info(f'Positive: {themeof_count+causeof_count} (ThemeOf: {themeof_count}, '\r\n f'CauseOf: {causeof_count}), '\r\n f'Negative: {norelation_count}.')\r\n\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n def __getitem__(self, item):\r\n return self.data[item], self.label[item]\r\n\r\n def load_special_token(self):\r\n with open(self.ner_label_file) as f:\r\n for line in f:\r\n ner_label = line.strip()\r\n self.special_token_set.add(f'')\r\n self.special_token_set.add(f'')\r\n\r\n @staticmethod\r\n def add_span_token(sentence, label1, offset1, label2, offset2):\r\n\r\n offset_to_label = {offset1: label1, offset2: label2}\r\n\r\n first_offset, second_offset = sorted([offset1, offset2])\r\n\r\n first_label = offset_to_label[first_offset]\r\n second_label = offset_to_label[second_offset]\r\n\r\n first_head, first_tail = f'', f''\r\n second_head, second_tail = f'', f''\r\n\r\n sent_list = [s for s in sentence]\r\n sent_list.insert(second_offset[1], second_tail)\r\n sent_list.insert(second_offset[0], second_head)\r\n\r\n sent_list.insert(first_offset[1], first_tail)\r\n sent_list.insert(first_offset[0], first_head)\r\n\r\n sent = ''.join(sent_list)\r\n return sent\r\n\r\n\r\n def read_label(self):\r\n\r\n with open(self.label_file) as f:\r\n for line in f:\r\n label = line.strip()\r\n self.label_set.add(label)\r\n\r\n self.label_to_index = {label: idx for idx, label\r\n in enumerate(self.label_set)}\r\n self.index_to_label = {idx: label for idx, label\r\n in enumerate(self.label_set)}\r\n\r\n def read_data(self):\r\n with open(self.data_path, encoding='utf-8') as f:\r\n f.readline()\r\n for line in f:\r\n l = line.strip().split('\\t')\r\n\r\n token1, label1, offset1 = l[0], l[1], l[2]\r\n token2, label2, offset2 = l[3], l[4], l[5]\r\n relation, sentence = l[6], l[7]\r\n\r\n offset1 = eval(offset1)\r\n offset2 = eval(offset2)\r\n # print(token1,label1, token2,label2, sentence, offset1, offset2)\r\n\r\n # fixme: check the correctness of the token offset\r\n sentence_token = sentence[int(offset1[0]): int(offset1[1])].strip()\r\n if token1 != sentence_token:\r\n print(sentence)\r\n print(offset1)\r\n print(f'{token1}-{len(token1)}-{sentence_token}-{len(sentence_token)}')\r\n print()\r\n\r\n # 4-20 delete add [CLS] and [SEP] in the head and tail\r\n if self.load_denotation:\r\n if self.add_denotation_span:\r\n sentence = self.add_span_token(sentence, label1, offset1,\r\n label2, offset2)\r\n # data = f'[CLS]{sentence}[SEP]'\r\n data = f'{sentence}'\r\n else:\r\n # data = f'[CLS]{sentence}[SEP]' \\\r\n # f'{token1}[SEP]{label1}[SEP]' \\\r\n # f'{token2}[SEP]{label2}[SEP]'\r\n data = f'{sentence}[SEP]' \\\r\n f'{token1}[SEP]{label1}[SEP]' \\\r\n f'{token2}[SEP]{label2}'\r\n\r\n else:\r\n # data = f'[CLS]{sentence}[SEP]' \\\r\n # f'{token1}[SEP]' \\\r\n # f'{token2}[SEP]'\r\n data = f'{sentence}[SEP]' \\\r\n f'{token1}[SEP]' \\\r\n f'{token2}'\r\n\r\n self.data_statistics[relation] += 1\r\n self.data.append(data)\r\n self.label.append(relation)\r\n\r\n\r\n def print_example(self):\r\n logger.info('Positive Example:')\r\n for i in range(len(self.data)):\r\n if self.label[i] == 'CauseOf' or self.label[i] == 'ThemeOf':\r\n logger.info(f'{self.data[i]}\\t{self.label[i]}')\r\n break\r\n logger.info('Negative Example:')\r\n for i in range(len(self.data)):\r\n if self.label[i] == 'NoRelation':\r\n logger.info(f'{self.data[i]}\\t{self.label[i]}')\r\n break\r\n\r\n # todo: Anonymously\r\n\r\n\r\nif __name__ == '__main__':\r\n # pass\r\n\r\n # from src.config import config\r\n args = config()\r\n #\r\n train_dataset = RE_Dataset(args, 'train', )\r\n #\r\n print('Positive Example:')\r\n print(train_dataset[26])\r\n #\r\n # print()\r\n print('Negative Example:')\r\n for i in train_dataset:\r\n if i[-1] == 'NoRelation':\r\n print(i)\r\n break\r\n\r\n\r\n","repo_name":"YaoXinZhi/BERT-for-BioNLP-OST2019-AGAC-Task2","sub_path":"src/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"30461858163","text":"from __future__ import absolute_import\nimport json\nfrom datetime import datetime\nimport pytz\nimport logging \nimport os\nimport os.path as osp\nimport six\n\nclass Dict(dict):\n \"\"\"\n A dictionary that allows member access to its keys.\n \"\"\"\n\n def __init__(self, d):\n \"\"\"\n Updates itself with d.\n \"\"\"\n self.update(d)\n\n def __getattr__(self, item):\n return self[item]\n\n def __setattr__(self, item, value):\n self[item] = value\n\n\ndef load_profiles():\n \"\"\"\n Load information on job profiles available to users.\n\n :return: a dict keyed by profile id containing Dicts with profile info\n \"\"\"\n profs = json.load(open('etc/profiles.json'))\n return {name:Dict(p) for name,p in six.iteritems(profs)}\n\ndef to_esmf(ts):\n \"\"\"\n Convert a UTC datetime into a ESMF string.\n\n :param ts: the datetime object\n :return: the date time in ESMF format\n \"\"\"\n return '%04d-%02d-%02d_%02d:%02d:%02d' % (ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second)\n\n\ndef to_utc(esmf):\n \"\"\"\n Parse and convert an ESMF datetime into a datetime in UTC.\n\n :param esmf: the ESMF string YYYY-MM-DD_hh:mm:ss\n :return: a datetime in the UTC timezone\n \"\"\"\n year, mon, day = int(esmf[0:4]), int(esmf[5:7]), int(esmf[8:10])\n hour, min, sec = int(esmf[11:13]), int(esmf[14:16]), int(esmf[17:19])\n return datetime(year, mon, day, hour, min, sec, tzinfo=pytz.utc)\n\ndef rm(paths):\n \"\"\"\n Try to remove a file.\n\n :param path: list of file paths\n :return: 'OK', otherwise error\n \"\"\"\n for f in paths:\n logging.debug('Deleting %s' % f)\n try:\n os.remove(f)\n logging.info('Deleted %s' % f)\n except OSError as err:\n logging.error('Cannot delete %s: %s' % (f,err.strerror))\n\n\ndef load_sys_cfg():\n # load the system configuration\n sys_cfg = None\n try:\n sys_cfg = Dict(json.load(open('etc/conf.json')))\n except IOError:\n logging.critical('Cannot find system configuration, have you created etc/conf.json?')\n sys.exit(2)\n # set defaults\n sys = sys_cfg.sys_install_path = sys_cfg.get('sys_install_path',os.getcwd())\n sys_cfg.jobs_path = sys_cfg.get('jobs_path',osp.join(sys,'jobs'))\n sys_cfg.logs_path = sys_cfg.get('logs_path',osp.join(sys,'logs'))\n sys_cfg.sims_path = sys_cfg.get('sims_path',osp.join(sys,'simulations'))\n\n return sys_cfg\n\n","repo_name":"openwfm/wrfxctrl","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"30810347663","text":"from flask import Flask , current_app,g,request,make_response,render_template,redirect,abort,render_template_string,render_template#导入库\napp=Flask(__name__)#创建实例\napp.config[\"DEBUG\"] = True\n\n\"在html中,{{变量名}}\"\n@app.route('/current')\ndef index():\n print(app)\n print(current_app)#当前实例\n return 'index'\n\"\"\"路由配置一\"\"\"\n@app.route('/') #装饰器,route的用途:抓取识别/后的内容,如果匹配上就运行下面函数。如:www.baidu。com/indenx就会运行hello_world2\ndef hello_world():\n #视图函数\n return \"hello world\"\ndef hello_world2():\n #视图函数\n return \"hello world222\"\n\"\"\"路由配置二\"\"\"\napp.add_url_rule('/home','home',hello_world2)\n\n\n\"可以从装饰器传参到函数中\"\n@app.route('/user/')\ndef list_user(page):\n return '您好,你是第{}位用户'.format(int(page)*10)\n\n\n\"\"\"请求报文\"\"\"\n@app.route('/test/req')\ndef test_req():\n \"获取get请求参数\"\n get_args=request.args\n print(get_args)\n page=request.args.get('page')#获取请求时带的参数,括号写参数名\n print(page)\n \"获得请求其他信息:ip等\"\n hard=request.headers\n print(hard)\n ip=request.remote_addr\n print('ip:',ip)\n\n return \"test \"\n\n\"请求钩子,拦截请求,还有after_request/trardown_request\"\n@app.before_first_request#\"服务器启动第一个请求到达\"\ndef first_request():\n print('first_request')\n@app.before_request\ndef first_request():\n print('everone_request')\n\n\n\"响应报文\"\n@app.route('/test/html')\ndef test_response():\n #构造一个响应对象\n # resp=make_response('这是一个响应对象',403,{'hahaha':'oooo'}) #可以定义返回的内容\n\n #响应html\n html=\"HTML文本显示\"#直接写,不规范\n htmls = render_template('kate.html')\n resp=make_response(htmls,400)\n return resp\n\n\"重定向\"\n\n@app.route('/cdx')\ndef redirectss():\n #根据ip拦截\n ip_list=['0227.0.0.1']\n ip=request.remote_addr\n if ip in ip_list:\n abort(403)#可以引导报错代码,配合下面的forbidden_page :实现没权限访问的页面展示\n\n #redirect重定向页面,abort重定向错误\n return redirect('/test/html')# 访问/时 引导到index\n\nprint(app.url_map)#查看路由连接\n\n@app.errorhandler(403)\ndef forbidden_page(err):\n return \"您没有权利访问,请联系管理员开通权限\"\n\n@app.route('/kate')\ndef to_kate():\n htmls = render_template('kate.html')\n resp=make_response(htmls,400)\n return resp\n\n\n@app.route('/moban')\ndef moban():\n #return render_template_string(\"hello\")#自动渲染字符串\n age=8\n name='帅哥'#替换变量or创建字典\n user_info={'add':'温州'}\n return render_template('kate.html',age=age,name=name,user_info=user_info)#自动获取指定html进行渲染\n\n\"模版标签,模版是可以在html中变量,而tag是指在html中写一些逻辑判断\"\n\n@app.route('/tag')\ndef tag():\n var=1\n return render_template('tag.html',var=var)\n\n\"\"\"\n过滤器的使用 |\nsafe:图文转义\n\"\"\"\n@app.route('/filter')\ndef filter():\n wecome='hello,luck'\n phone='13712345672'\n return render_template('filer.html',wecome=wecome,phone=phone)\n\n\"自定义过滤器\"\n\n@app.template_filter('phone_format')#过滤器名\ndef phone_format(phone):\n \"电话号码脱敏\"\n return phone[0:3]+'****'+phone[7:]\n\n\"全局参数:��接写在html里面\" \\\n\"跳转 url_for\"\n\n@app.route('/url_ok')\ndef url_ok():\n\n return render_template('url_ok.html')\n\n\n\"\"\"\n宏,就类似与python的函数,可以直接写在html中,也可以专门写在一个html里,在别的html里面调用\" \\\n调用举例:{% from \"macros.html\" import render_button %}\n\n\n\"\"\"\n\n\nif __name__ == '__main__':# 第一种启动方式,但不推荐\n app.run()","repo_name":"yelushu/flask","sub_path":"1初步使用.py","file_name":"1初步使用.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19086248808","text":"from math import sqrt\n\na, b, c = input(f\"a, b, c = ? \\n\").split()\n\ntry:\n a = float(a)\n b = float(b)\n c = float(c)\nexcept ValueError:\n print(\"Input must be a number\")\n exit()\n\nd = b**2 - 4*a*c\nif d > 0:\n x1 = (-b + sqrt(d)) / 2 * a\n x2 = (-b - sqrt(d)) / 2 * a\n print(f\"x1 = {x1}, x2 = {x2}\")\nelif d == 0:\n x1 = (-b + sqrt(d)) / 2 * a\n print(f\"x = {x1}\")\nelse:\n print(\"No real roots\")\n\n\"\"\"\nTerminal> python quadratic_roots_input.py\na, b, c = ? \n1 0 -1\nx1 = 1.0, x2 = -1.0\n\nProcess finished with exit code 0\n\"\"\"","repo_name":"willidu/IN1900","sub_path":"Obliger/Uke 38/quadratic_roots_input.py","file_name":"quadratic_roots_input.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38749721179","text":"from sortedcontainers import *\n\nfrom bisect import *\nfrom collections import *\nfrom functools import *\nfrom heapq import *\nimport itertools\nfrom string import whitespace, ascii_lowercase, ascii_uppercase, ascii_letters, digits, hexdigits, octdigits, punctuation, printable\n\nfrom util import *\n\n\"\"\"\necho \"=== sample ===\" ; py day22.py < sample.in\necho \"=== real ===\" ; py day22.py < day22.in\necho \"=== sample ===\" ; py day22.py < sample.in ; echo \"=== real ===\" ; py day22.py < day22.in\n\"\"\"\n\nA = read_input('/dev/stdin')\n\ndef add_interval(ss, L, R):\n assert L <= R\n if L == R:\n return None\n idx = ss.bisect_left((L, R))\n while idx < len(ss):\n ival = ss[idx]\n if ival[0] > R:\n break\n R = max(R, ival[1])\n ss.pop(idx)\n if idx > 0:\n idx -= 1\n ival = ss[idx]\n if ival[1] >= L:\n L = min(L, ival[0])\n R = max(R, ival[1])\n ss.pop(idx)\n res = (L, R)\n ss.add(res)\n return res\n\ndef remove_interval(ss, L, R):\n assert L <= R\n if L == R:\n return\n added = add_interval(ss, L, R)\n r2 = added[1]\n ss.remove(added)\n if added[0] != L:\n ss.add((added[0], L))\n if R != r2:\n ss.add((R, r2))\n\nres = 0\n\nN = len(A)\n\nd = defaultdict(int)\n\nB = []\n\nfor line in A:\n a, b = line.split(' ')\n x, y, z = b.split(',')\n\n L = []\n for c in (x, y, z):\n lo, hi = map(int, c[2:].split('..'))\n # if lo < -50: lo = -50\n # if hi > 50: hi = 50\n L.append((lo, hi))\n if a == 'on':\n L.append(1)\n elif a == 'off':\n L.append(0)\n else:\n assert False\n B.append(L)\n\n\nz_events = defaultdict(list)\n\nfor x, y, z, a in B:\n z_events[z[0]].append(0)\n z_events[z[1] + 1].append(1)\n\n\n\ndef get_cells_on_at_z(desired_z):\n events = defaultdict(list)\n for x, y, z, a in B:\n if z[0] <= desired_z <= z[1]:\n xlo, xhi = x[0], x[1]\n ylo, yhi = y[0], y[1]\n events[ylo].append(0)\n events[yhi + 1].append(1)\n active = 0\n last_y = -99999999999\n res = 0\n for evt in sorted(events.keys()):\n ree = SortedSet()\n for x, y, z, a in B:\n if y[0] <= evt <= y[1] and z[0] <= desired_z <= z[1]:\n if a:\n add_interval(ree, x[0], x[1] + 1)\n else:\n remove_interval(ree, x[0], x[1] + 1)\n nactive = sum(b - a for a, b in ree)\n res += (evt - last_y) * active\n active = nactive\n last_y = evt\n assert active == 0\n return res\n\nres = 0\nactive = 0\nlast_z = -99999999999\nfor evt in sorted(z_events.keys()):\n grid = defaultdict(int)\n nactive = get_cells_on_at_z(evt)\n res += (evt - last_z) * active\n active = nactive\n last_z = evt\nassert active == 0\nprint(res)\n\n \n\n\n\n# for x in range(L[0][0], L[0][1] + 1):\n# for y in range(L[1][0], L[1][1] + 1):\n# for z in range(L[2][0], L[2][1] + 1):\n# if a == 'on':\n# d[(x, y, z)] = 1\n# elif a == 'off':\n# d[(x, y, z)] = 0\n# else:\n# assert False\n\n# for k in range(-50, 51):\n# res += d[(i, j, k)]\n\n# print(res)\n","repo_name":"blin00/advent-of-code","sub_path":"2021/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"81"} +{"seq_id":"70501368264","text":"import sys\r\nimport os\r\nimport pathlib\r\n\r\nsys.path.append(os.path.join(pathlib.Path(__file__).parent.parent, \"src\"))\r\n\r\nimport json\r\nimport dash\r\nimport dash_bootstrap_components as dbc\r\nfrom layout import Dashboard\r\n\r\napp = dash.Dash(\r\n \"SampleApp\",\r\n external_stylesheets=[\r\n dbc.themes.FLATLY,\r\n dbc.icons.FONT_AWESOME,\r\n \"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css\",\r\n \r\n ],\r\n external_scripts=[\r\n \"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS-MML_SVG\",\r\n ],\r\n meta_tags=[\r\n {\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1.0\"}\r\n ],\r\n)\r\n\r\nconfigDir = os.path.join(pathlib.Path(__file__).parent.parent, \"config\")\r\n# Load config\r\nwith open(os.path.join(configDir, \"config.json\")) as configFile:\r\n config = json.load(configFile)\r\nwith open(os.path.join(configDir, config[\"secretsJson\"])) as secretsJson:\r\n config[\"secrets\"] = json.load(secretsJson)\r\n\r\n# Create the layout for the app\r\nappLayout = Dashboard(app=app, config=config)\r\nappLayout.setAppLayout()\r\napp.run_server(debug=True)\r\n","repo_name":"anujch144/Question-Mapping","sub_path":"sample_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34411246139","text":"from .Singleton import Singleton\nfrom datetime import datetime\nfrom colorama import Fore,Back, Style\n\nclass Logger(metaclass=Singleton):\n def __init__(self):\n self.logs = []\n\n def log(self, msg, type=1):\n ## Types\n # 1 : Information\n # 2 : Success\n # 3 : Failure\n # 4 : Data\n date = datetime.now().strftime('%Y %H:%M:%S')\n types = {\n 1: Fore.YELLOW,\n 2: Fore.GREEN,\n 3: Fore.RED,\n 4: Fore.MAGENTA\n }\n color = types.get(type)\n log = str(f'{date} *{color} {msg} {Style.RESET_ALL}')\n Logger().logs.append(log)\n print(log)\n\n","repo_name":"viktorholk/subreddit-scraper","sub_path":"helpers/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6907760690","text":"from math import *\nfrom utilities import *\nfrom reader import *\nfrom vector import *\nfrom matrix import *\nfrom textures import *\nfrom progress.bar import Bar\n\nBLACK = color(0, 0, 0)\nWHITE = color(255, 255, 255)\nRED = color(255, 0, 0)\nBLUE = color(0, 0, 255)\nGREEN = color(0, 255, 0)\n\nsize = 0\n\n\nclass Render(object):\n def __init__(self, width, height):\n self.width = width # ancho de la ventana\n self.height = height # ancho de la ventana\n self.current_color = WHITE # color default de la ventana\n self.clear_color = BLACK # color para hacerle clear a la ventana\n self.viewX = 0\n self.viewY = 0\n self.viewWidth = 0\n self.viewHeight = 0\n self.copiaZ = []\n self.texture = None\n self.shader = None\n self.vertex_array = []\n self.vertex_buffer_object = []\n self.clear()\n self.Model = None\n self.View = None\n self.light = V3(0, 0, 1)\n # self.Projection = None\n\n def loadModelMatrix(self, translate=(0, 0, 0), scale=(1, 1, 1), rotate=(0, 0, 0)):\n translate = V3(*translate)\n scale = V3(*scale)\n rotate = V3(*rotate)\n\n translation_matrix = matriz([\n [1, 0, 0, translate.x],\n [0, 1, 0, translate.y],\n [0, 0, 1, translate.z],\n [0, 0, 0, 1]\n ])\n\n scale_matrix = matriz([\n [scale.x, 0, 0, 0],\n [0, scale.y, 0, 0],\n [0, 0, scale.z, 0],\n [0, 0, 0, 1]\n ])\n\n a = rotate.x\n rotation_x = matriz([\n [1, 0, 0, 0],\n [0, cos(a), -sin(a), 0],\n [0, sin(a), cos(a), 0],\n [0, 0, 0, 1]\n ])\n\n a = rotate.y\n rotation_y = matriz([\n [cos(a), 0, sin(a), 0],\n [0, 1, 0, 0],\n [-sin(a), 0, cos(a), 0],\n [0, 0, 0, 1]\n ])\n\n a = rotate.z\n rotation_z = matriz([\n [cos(a), -sin(a), 0, 0],\n [sin(a), cos(a), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ])\n\n rotation_matrix = rotation_x * rotation_y * rotation_z\n\n self.Model = translation_matrix * rotation_matrix * scale_matrix\n\n def loadViewMatrix(self, x, y, z, center):\n\n Mi = matriz([\n [x.x, x.y, x.z, 0],\n [y.x, y.y, y.z, 0],\n [z.x, z.y, z.z, 0],\n [0, 0, 0, 1],\n ])\n\n Op = matriz([\n [1, 0, 0, -center.x],\n [0, 1, 0, -center.y],\n [0, 0, 1, -center.z],\n [0, 0, 0, 1]\n ])\n\n self.View = Mi * Op\n\n def loadProjectionViewMatrix(self, eyes, center):\n coeff = -1/(eyes.length() - center.length())\n self.Projection = matriz([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, coeff, 1]\n ])\n\n def loadViewPortMatrix(self, width=0, height=0):\n x = 0\n y = 0\n\n if width != 0:\n w = width\n else:\n w = self.width/2\n\n if height != 0:\n h = height\n else:\n h = self.height/2\n\n self.Viewport = matriz([\n [w, 0, 0, x + w],\n [0, h, 0, y + h],\n [0, 0, 128, 128],\n [0, 0, 0, 1]\n ])\n\n def lookAt(self, eyes, center, up):\n\n z = (eyes-center).normalize()\n x = (up * z).normalize()\n y = (z * x).normalize()\n\n self.loadViewMatrix(x, y, z, center)\n self.loadProjectionViewMatrix(eyes, center)\n self.loadViewPortMatrix()\n\n def clamping(self, num):\n return int(max(min(num, 255), 0))\n\n def clear(self):\n self.framebuffer = [\n [self.clear_color for x in range(self.width)]\n for y in range(self.height)\n ]\n self.zBufferClear = [\n [self.clear_color for x in range(self.width)]\n for y in range(self.height)\n ]\n self.zBuffer = [\n [-9999 for x in range(self.width)]\n for y in range(self.height)\n ]\n\n def write(self, filename):\n f = open(filename, 'bw')\n\n # pixel header\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14 + 40 + self.width * self.height * 3))\n f.write(word(0))\n f.write(word(0))\n f.write(dword(14 + 40))\n\n # info header\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n # pixel data\n for y in range(self.height):\n for x in range(self.width):\n f.write(self.framebuffer[y][x])\n\n f.close()\n\n def write2(self, filename):\n f = open(filename, 'bw')\n\n # pixel header\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14 + 40 + self.width * self.height * 3))\n f.write(word(0))\n f.write(word(0))\n f.write(dword(14 + 40))\n\n # info header\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n # pixel data\n for x in range(self.height):\n for y in range(self.width):\n f.write(self.zBufferClear[y][x])\n\n f.close()\n\n def point(self, x, y):\n if(0 < x < self.width and 0 < y < self.height):\n self.framebuffer[x][y] = self.current_color\n\n def line(self, v1, v2):\n x0 = round(v1.x)\n y0 = round(v1.y)\n x1 = round(v2.x)\n y1 = round(v2.y)\n\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n # Si es empinado, poco movimiento en x y mucho en y.\n steep = dy > dx\n\n # Se invierte si es empinado\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n\n # Si la linea tiene direccion contraria, invertir\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n offset = 0\n threshold = dx\n y = y0\n\n for x in range(x0, x1 + 1):\n if steep:\n self.point(x, y)\n else:\n self.point(y, x)\n\n offset += dy * 2\n\n if offset >= threshold:\n y += 1 if y0 < y1 else -1\n\n threshold += dx * 2\n\n def cordsFinales(self, x, y):\n viewX = self.viewX\n viewY = self.viewY\n ancho = self.viewWidth\n alto = self.viewHeight\n\n medioX = round(viewX + (ancho/2))\n medioY = round(viewY + (alto/2))\n\n x = medioX + round(x * (ancho / 2))\n y = medioY + round(y * (alto / 2))\n\n return [x, y]\n\n def transform_vertex(self, vertex):\n augmented_vertex = matriz([\n [vertex[0]],\n [vertex[1]],\n [vertex[2]],\n [1]\n ])\n\n if(self.View and self.Projection):\n transformed_vertex = (self.Viewport * self.Projection *\n self.View * self.Model * augmented_vertex).matriz\n else:\n transformed_vertex = (self.Model * augmented_vertex).matriz\n\n return V3(\n transformed_vertex[0][0] / transformed_vertex[3][0],\n transformed_vertex[1][0] / transformed_vertex[3][0],\n transformed_vertex[2][0] / transformed_vertex[3][0],\n )\n\n def generate_obj(self, modelo, translate=(0, 0, 0), scale=(1, 1, 1), rotate=(0, 0, 0)):\n\n self.loadModelMatrix(translate, scale, rotate)\n model = Obj(modelo)\n global size\n size = len(model.faces)\n bar = Bar('Generate Obj', max=len(model.faces))\n for face in model.faces:\n\n if len(face) == 4:\n f1 = face[0][0] - 1\n f2 = face[1][0] - 1\n f3 = face[2][0] - 1\n f4 = face[3][0] - 1\n\n v1 = self.transform_vertex(model.vertices[f1])\n v2 = self.transform_vertex(model.vertices[f2])\n v3 = self.transform_vertex(model.vertices[f3])\n v4 = self.transform_vertex(model.vertices[f4])\n\n if self.texture and len(model.tvertices) != 0:\n ft1 = face[0][1] - 1\n ft2 = face[1][1] - 1\n ft3 = face[2][1] - 1\n ft4 = face[3][1] - 1\n\n vt1 = V3(*model.tvertices[ft1])\n vt2 = V3(*model.tvertices[ft2])\n vt3 = V3(*model.tvertices[ft3])\n vt4 = V3(*model.tvertices[ft4])\n\n self.vertex_buffer_object.append(v1)\n self.vertex_buffer_object.append(v2)\n self.vertex_buffer_object.append(v3)\n\n self.vertex_buffer_object.append(vt1)\n self.vertex_buffer_object.append(vt2)\n self.vertex_buffer_object.append(vt3)\n\n if self.shader:\n\n fn1 = face[0][2] - 1\n fn2 = face[1][2] - 1\n fn3 = face[2][2] - 1\n\n vn1 = V3(*model.nvertices[fn1])\n vn2 = V3(*model.nvertices[fn2])\n vn3 = V3(*model.nvertices[fn3])\n\n self.vertex_buffer_object.append(vn1)\n self.vertex_buffer_object.append(vn2)\n self.vertex_buffer_object.append(vn3)\n\n self.vertex_buffer_object.append(v1)\n self.vertex_buffer_object.append(v3)\n self.vertex_buffer_object.append(v4)\n\n self.vertex_buffer_object.append(vt1)\n self.vertex_buffer_object.append(vt3)\n self.vertex_buffer_object.append(vt4)\n\n if self.shader:\n fn1 = face[0][2] - 1\n fn3 = face[2][2] - 1\n fn4 = face[3][2] - 1\n\n vn1 = V3(*model.nvertices[fn1])\n vn3 = V3(*model.nvertices[fn3])\n vn4 = V3(*model.nvertices[fn4])\n\n self.vertex_buffer_object.append(vn1)\n self.vertex_buffer_object.append(vn3)\n self.vertex_buffer_object.append(vn4)\n\n else:\n self.vertex_buffer_object.append(v1)\n self.vertex_buffer_object.append(v2)\n self.vertex_buffer_object.append(v3)\n\n if self.shader:\n fn1 = face[0][2] - 1\n fn2 = face[1][2] - 1\n fn3 = face[2][2] - 1\n fn4 = face[3][2] - 1\n\n vn1 = V3(*model.nvertices[fn1])\n vn2 = V3(*model.nvertices[fn2])\n vn3 = V3(*model.nvertices[fn3])\n vn4 = V3(*model.nvertices[fn4])\n\n self.vertex_buffer_object.append(vn1)\n self.vertex_buffer_object.append(vn2)\n self.vertex_buffer_object.append(vn3)\n\n self.vertex_buffer_object.append(v1)\n self.vertex_buffer_object.append(v3)\n self.vertex_buffer_object.append(v4)\n\n if self.shader:\n fn1 = face[0][2] - 1\n fn2 = face[1][2] - 1\n fn3 = face[2][2] - 1\n fn4 = face[3][2] - 1\n\n vn1 = V3(*model.nvertices[fn1])\n vn2 = V3(*model.nvertices[fn2])\n vn3 = V3(*model.nvertices[fn3])\n vn4 = V3(*model.nvertices[fn4])\n\n self.vertex_buffer_object.append(vn1)\n self.vertex_buffer_object.append(vn3)\n self.vertex_buffer_object.append(vn4)\n\n if len(face) == 3 and len(model.tvertices) != 0:\n f1 = face[0][0] - 1\n f2 = face[1][0] - 1\n f3 = face[2][0] - 1\n\n v1 = self.transform_vertex(model.vertices[f1])\n v2 = self.transform_vertex(model.vertices[f2])\n v3 = self.transform_vertex(model.vertices[f3])\n\n self.vertex_buffer_object.append(v1)\n self.vertex_buffer_object.append(v2)\n self.vertex_buffer_object.append(v3)\n\n if self.texture:\n ft1 = face[0][1] - 1\n ft2 = face[1][1] - 1\n ft3 = face[2][1] - 1\n\n vt1 = V3(*model.tvertices[ft1])\n vt2 = V3(*model.tvertices[ft2])\n vt3 = V3(*model.tvertices[ft3])\n\n self.vertex_buffer_object.append(vt1)\n self.vertex_buffer_object.append(vt2)\n self.vertex_buffer_object.append(vt3)\n\n fn1 = face[0][2] - 1\n fn2 = face[1][2] - 1\n fn3 = face[2][2] - 1\n\n vn1 = V3(*model.nvertices[fn1])\n vn2 = V3(*model.nvertices[fn2])\n vn3 = V3(*model.nvertices[fn3])\n\n self.vertex_buffer_object.append(vn1)\n self.vertex_buffer_object.append(vn2)\n self.vertex_buffer_object.append(vn3)\n\n bar.next()\n bar.finish()\n\n def draw(self, polygon):\n self.vertex_array = iter(self.vertex_buffer_object)\n global size\n bar = Bar('Draw', max=size)\n\n if polygon == 'TRIANGLES':\n try:\n while True:\n self.triangle2Vectors()\n bar.next()\n except StopIteration:\n print('Done.')\n bar.finish()\n if polygon == 'WIREFRAME':\n try:\n while True:\n self.triangle_wireframe()\n except StopIteration:\n print(\" done . \")\n\n def generateVectors(self, model):\n\n t = Texture('./models/' + model + '.bmp')\n self.framebuffer = t.pixels\n self.texture = Texture('./models/' + model + '.bmp')\n self.current_color = color(255, 255, 255)\n\n model = Obj('./models/' + model + '.obj')\n\n for face in model.faces:\n\n if len(face) == 4:\n ft1 = face[0][1] - 1\n ft2 = face[1][1] - 1\n ft3 = face[2][1] - 1\n ft4 = face[3][1] - 1\n\n vt1 = V3(\n model.tvertices[ft1][0] * t.width,\n model.tvertices[ft1][1] * t.width\n )\n vt2 = V3(\n model.tvertices[ft2][0] * t.width,\n model.tvertices[ft2][1] * t.width\n )\n vt3 = V3(\n model.tvertices[ft3][0] * t.width,\n model.tvertices[ft3][1] * t.width\n )\n vt4 = V3(\n model.tvertices[ft4][0] * t.width,\n model.tvertices[ft4][1] * t.width\n )\n\n self.line(vt1, vt2)\n self.line(vt2, vt3)\n self.line(vt3, vt4)\n self.line(vt4, vt1)\n\n if len(face) == 3:\n ft1 = face[0][1] - 1\n ft2 = face[1][1] - 1\n ft3 = face[2][1] - 1\n\n vt1 = V3(\n model.tvertices[ft1][0] * t.width,\n model.tvertices[ft1][1] * t.width\n )\n vt2 = V3(\n model.tvertices[ft2][0] * t.width,\n model.tvertices[ft2][1] * t.width\n )\n vt3 = V3(\n model.tvertices[ft3][0] * t.width,\n model.tvertices[ft3][1] * t.width\n )\n\n self.line(vt1, vt2)\n self.line(vt2, vt3)\n self.line(vt3, vt1)\n\n def prodPunto(self, v1, v2):\n return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z\n\n def triangle_wireframe(self):\n A = next(self.vertex_array)\n B = next(self.vertex_array)\n C = next(self.vertex_array)\n\n if self.texture:\n tA = next(self.vertex_array)\n tB = next(self.vertex_array)\n tC = next(self.vertex_array)\n\n self.line(A, B)\n self.line(B, C)\n self.line(C, A)\n\n def triangle2Vectors(self):\n\n A = next(self.vertex_array)\n B = next(self.vertex_array)\n C = next(self.vertex_array)\n\n if self.texture:\n tA = next(self.vertex_array)\n tB = next(self.vertex_array)\n tC = next(self.vertex_array)\n\n nA = next(self.vertex_array)\n nB = next(self.vertex_array)\n nC = next(self.vertex_array)\n\n Bmin, Bmax = bounding_box(A, B, C)\n Bmin.round()\n Bmax.round()\n\n for x in range((Bmin.x), Bmax.x + 1):\n for y in range(Bmin.y, Bmax.y + 1):\n w, v, u = barycentric(A, B, C, V3(x, y))\n\n if (w < 0 or v < 0 or u < 0):\n continue\n\n z = A.z * w + B.z * v + C.z * u\n depth = z/self.width\n try:\n if (self.zBuffer[x][y] < z):\n self.zBuffer[x][y] = z\n self.zBufferClear[x][y] = color(\n self.clamping(depth*255),\n self.clamping(depth*255),\n self.clamping(depth*255)\n )\n\n self.current_color = self.shader(\n self,\n bar=(w, u, v),\n vertices=(A, B, C),\n texture_coords=(tA, tB, tC),\n normals=(nA, nB, nC),\n light=self.light,\n\n )\n self.point(y, x)\n except:\n continue\n\n def cube(self, vertices, tvertices=()):\n A, B, C, D = vertices\n\n if self.texture and tvertices != 0:\n tA, tB, tC, tD = tvertices\n\n self.triangle2Vectors((A, B, C), (tA, tB, tC))\n self.triangle2Vectors((A, C, D), (tA, tC, tD))\n else:\n self.triangle2Vectors((A, B, C))\n self.triangle2Vectors((A, C, D))\n","repo_name":"javim7/SR-ComputerGraphics","sub_path":"Proyect1-SoftwareRenderer/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":18798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24382103002","text":"# Problem: We have 100 doors are closed\n# In the first iteration will open all doors\n# from the second iteration we need to close doors from the second door like: 2, 4, 6, etc.\n# in the third iteration we need to change the door state from the third door like 3, 6, 9, etc.\n# at the final iteration we need to count how many door remains open\n\n\ndef count_open_doors():\n doors = [False] * 101\n for i in range(1, 101):\n for j in range(i, 101, i):\n doors[j] = not doors[j]\n if doors[i] is True:\n print(\"Door number\", i, \"is open\")\n print(\"Total number of open doors:\", doors.count(doors[True]))\n\n\ncount_open_doors()\n","repo_name":"unxusr/problem_solving","sub_path":"doors.py","file_name":"doors.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20697416233","text":"import json\nimport os\nimport re\nimport sys\n\n\ndef get_config(url) -> dict:\n \"\"\"\n \"\"\"\n # basic setup\n BASE_PATH = os.path.dirname(os.path.realpath(__file__))\n\n with open(BASE_PATH+\"/config.json\") as json_data_file:\n data = json.load(json_data_file)\n\n # retrieve all supported keys\n\n default = data[\"defaults\"]\n base_url_regex = data[\"base_url_regex\"]\n if re.match(base_url_regex, url):\n found_url = re.match(base_url_regex, url).group(0)\n else:\n sys.exit(\"URL not recognized!\")\n\n key = \"\"\n for k in default:\n if default[k][\"url\"] == found_url:\n key = default[k][\"key\"]\n return data[key]\n if key == \"\":\n sys.exit(f\"Key {key} not found!\")\n\n\n","repo_name":"tchr-dev/novel-downloader-2","sub_path":"dwnldr/config_reader.py","file_name":"config_reader.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39834506714","text":"def calc(cur_word, word):\n diff_cnt = 0\n for c, w in zip(cur_word, word):\n if c != w: diff_cnt += 1\n if diff_cnt == 1:\n return True\n else:\n return False\n\n\ndef solution(begin, target, words):\n if target not in words:\n return 0\n answer = 0\n visited = [0 for _ in range(len(words))]\n path = [begin]\n\n while path:\n cur_word = path.pop()\n if cur_word == target:\n return answer\n for idx, word in enumerate(words):\n if not visited[idx] and calc(cur_word, word):\n visited[idx] = 1\n path.append(word)\n\n answer += 1\n","repo_name":"devhyojin/Algorithm","sub_path":"Programmers/[Programmers]코딩테스트고득점Kit_DFSBFS_단어 변환.py","file_name":"[Programmers]코딩테스트고득점Kit_DFSBFS_단어 변환.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"72266316744","text":"from django.contrib.auth.models import User\nfrom django.contrib.gis.geos import GEOSGeometry\nimport pyproj\nfrom rest_framework import serializers\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\nfrom rest_api.models import Spot, Tip, Review, Photo, Favorite, ParkingLocation\nfrom rest_api.tools import parse_float_list, METERS_PER_MILE\n\n\nclass DynamicFieldsSerializerMixin(object):\n \"\"\"\n Used to add fields argument to serializers at point of instantiation.\n These are the fields to be returned on a GET request.\n Fields options include those specified under the serializer's meta class.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop('fields', None)\n\n # Instantiate the superclass normally\n super(DynamicFieldsSerializerMixin, self).__init__(*args, **kwargs)\n\n if fields is not None:\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n\nclass UserSerializer(DynamicFieldsSerializerMixin, serializers.HyperlinkedModelSerializer):\n\n password = serializers.CharField(\n style={'input_type': 'password'}, write_only=True\n )\n\n class Meta:\n model = User\n fields = (\n 'id',\n 'username',\n 'password',\n 'email',\n 'first_name',\n 'last_name'\n )\n\n\n def create(self, validated_data):\n user = User.objects.create(\n username=validated_data['username'],\n email=validated_data['email'],\n first_name=validated_data['first_name'],\n last_name=validated_data['last_name']\n )\n\n user.set_password(validated_data['password'])\n user.save()\n\n return user\n\n\nclass TipSerializer(DynamicFieldsSerializerMixin, serializers.HyperlinkedModelSerializer):\n\n spot = serializers.PrimaryKeyRelatedField(read_only=True)\n author = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Tip\n fields = (\n \"id\",\n \"spot\",\n \"tip\",\n \"created_at\",\n \"author\",\n )\n read_only_fields = (\n \"id\",\n \"spot\",\n \"created_at\",\n \"author\",\n )\n\n\nclass ReviewSerializer(DynamicFieldsSerializerMixin, serializers.HyperlinkedModelSerializer):\n\n spot = serializers.PrimaryKeyRelatedField(read_only=True)\n author = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Review\n fields = (\n \"id\",\n \"spot\",\n \"rating\",\n \"review\",\n \"created_at\",\n \"author\"\n )\n read_only_fields = (\n \"id\",\n \"spot\",\n \"created_at\",\n \"author\",\n )\n\n\nclass ParkingLocationSerializer(DynamicFieldsSerializerMixin, GeoFeatureModelSerializer):\n\n spot = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = ParkingLocation\n geo_field = \"location\"\n fields = (\n \"id\",\n \"spot\",\n \"location\",\n \"description\",\n \"created_at\",\n )\n read_only_fields = (\n \"id\",\n \"spot\",\n \"created_at\",\n )\n\n\n\nclass SpotSerializer(DynamicFieldsSerializerMixin, GeoFeatureModelSerializer):\n\n tips = TipSerializer(many=True, read_only=True, fields=(\"tip\", \"created_at\"))\n\n reviews = ReviewSerializer(many=True, read_only=True, fields=(\"rating\", \"review\", \"created_at\"))\n\n additional_photos = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name=\"photo-detail\",\n lookup_field=\"pk\"\n )\n\n parking_locations = ParkingLocationSerializer(many=True, read_only=True)\n\n distance = serializers.SerializerMethodField()\n\n def get_distance(self, obj):\n\n curr_loc_str = self.context[\"request\"].QUERY_PARAMS.get(\"curr_loc\", None)\n curr_loc_lst = parse_float_list(curr_loc_str)\n\n if len(curr_loc_lst) == 2:\n geod = pyproj.Geod(ellps='WGS84')\n angle1,angle2,distance = geod.inv(curr_loc_lst[0], curr_loc_lst[1], obj.location.x[0], obj.location.y[0])\n return distance/METERS_PER_MILE\n\n return None\n\n\n class Meta:\n model = Spot\n geo_field = \"location\"\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"family_safe\",\n \"skill_level\",\n \"danger_level\",\n \"feature_types\",\n \"activity_types\",\n \"primary_photo\",\n \"additional_photos\",\n \"tips\",\n \"reviews\",\n \"parking_locations\",\n \"distance\",\n )\n\n\nclass PhotoSerializer(DynamicFieldsSerializerMixin, serializers.HyperlinkedModelSerializer):\n\n spot = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Photo\n fields = (\n \"id\",\n \"photo\",\n \"created_at\",\n \"spot\",\n )\n read_only_fields = (\n \"id\",\n \"spot\",\n \"created_at\",\n )\n\nclass FavoriteSerializer(DynamicFieldsSerializerMixin, serializers.HyperlinkedModelSerializer):\n\n spot = SpotSerializer(read_only=True)\n user = UserSerializer(read_only=True)\n\n class Meta:\n model = Favorite\n fields = (\n \"id\",\n \"user\",\n \"spot\",\n \"created_at\",\n )\n read_only_fields = (\n \"id\",\n \"user\",\n \"spot\",\n \"created_at\",\n )\n\n","repo_name":"malerba118/nature_spotz_rest_api","sub_path":"rest_api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14349148125","text":"from src.pf_console.objects import pf_console as pfc\n\n'''\n\nThe PFPY_Console is used in development as intermediate step in progress toward GUI application\nto establish the back-end design and functionality before the added complexity of GUI implementation.\nThe Console is intended to be a functional program for real-life application.\n\nThe script gathers a variety of user input from the user including the bank from which the statement\nis being downloaded, the statement file location and the statement date. The program uses this \ninformation to map the transaction information into the standardized Transaction format. The new\nTransactions are stored in the heap in a pandas.DataFrame object.\n\nA CSV file of all historical transactions is stored on the hard drive. All historical transactions\nare loaded into a DataFrame and kept on the heap.\n\nIf a historical Transaction with the same transaction_id is not found then the transaction information\nis used to initialize a BudgetLine object. Once the user complete the BudgetLine transaction object it\n(or they, if the transaction is split in to multiple BudgetLine transactions) is loaded into the \nhistorical BudgetLine transaction file and the historical Transaction File is written to include\nthe new transaction.\n\n'''\nSAVED_TRANSACTIONS_FILE = '/Users/johnmatthew/Documents/6. Personal Finance/0. PersonalFinancePY/TRANSACTIONS_PersonalFinancePY.csv'\nSAVED_BUDGET_LINES_FILE = '/Users/johnmatthew/Documents/6. Personal Finance/0. PersonalFinancePY/BUDGET_LINES_PersonalFinancePY.csv'\nSTATEMENT_FILE = '/Users/johnmatthew/Documents/6. Personal Finance/3. Credit Card Statements/AMEX Gold Card 63002/2023/9. 2023-09-01.csv'\n\n# STATEMENT_FILE = fd.askopenfilename()\n\nc = pfc.PFConsole(SAVED_TRANSACTIONS_FILE, SAVED_BUDGET_LINES_FILE, STATEMENT_FILE)\nc.run()\n\nprint('ALL TRANSACTIONS IMPORTED.')\n","repo_name":"johnolszewskim/personal-finance-py","sub_path":"src/pf_console/main_pf_console.py","file_name":"main_pf_console.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71647782026","text":"class company:\n # pass\n increment = 1.5\n\n def __init__(self, firstName, lastName, Salary):\n self.firstname = firstName\n self.lastname = lastName\n self.salary = Salary\n\n def increase(self):\n self.salary = int(self.salary * company.increment)\n\n\nsaurabh = company(\"saurabh\", \"kandekar\", 44000)\nshankar = company(\"shankar\", \"kandekar\", 45000)\n\nsaurabh.increase()\n\nprint(saurabh.__dict__)\nsaurabh.increment = \"9\"\nprint(saurabh.__dict__)\n","repo_name":"AzharMomin07/Python","sub_path":"Python-main/OOPS/three.py","file_name":"three.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14607450088","text":"\"\"\"Given the head of a linked list and an integer val, remove all the nodes of\nthe linked list that has Node.val == val, and return the new head.\"\"\"\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def printList(self):\n temp = self.head\n while temp:\n print(temp.data, end=\" \")\n temp = temp.next\n\n def push(self, new_data):\n new_node = Node(new_data)\n if self.head is None:\n self.head = new_node\n return\n last = self.head\n while last.next:\n last = last.next\n last.next = new_node\n\n\ndef removeElement(head, val):\n dummy_head = Node(-1)\n dummy_head.next = head\n\n curr_node = dummy_head\n while curr_node.next != None:\n if curr_node.next.data == val:\n curr_node.next = curr_node.next.next\n else:\n curr_node = curr_node.next\n return dummy_head.next\n\nll = LinkedList()\nll2 = LinkedList()\nfor i in range(int(input(\"Enter the number of elements: \"))):\n ll.push(int(input()))\n\nll2.head = removeElement(ll.head, 4)\nll2.printList()\n","repo_name":"mmrraju/Coding-interview-preparation","sub_path":"Linked Lists/04_Remove_linkedList_elements.py","file_name":"04_Remove_linkedList_elements.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69905755787","text":"#!/usr/bin/env python\n#-*- coding: utf8 -*-\n\nimport sys\nimport os\nimport re\nimport csv\nimport codecs\nimport xmltodict\n\nfolder = sys.argv[1]\n\ndef walk_files_folder(folder):\n\tfilenames = []\n\tfor root, dirs, files in os.walk(folder):\n\t\tfor filename in files :\n\t\t\tif filename != \".DS_Store\":\n\t\t\t\tfilenames.append(os.path.join(root, filename))\n\treturn filenames\n\ndef encode(champs):\n\tcheck = \"\"\n\tif champs == None:\n\t\tcheck = \"\"\n\telse:\n\t\tcheck = champs\n\treturn check.encode('utf8')\n\n\nfor root, dirs, files in os.walk(folder):\n\n\tif \"stock_assoc\" in root.split('/')[-1]:\n\t\toutput = csv.writer(open('output/' + root.split('/')[-1] + '.csv', 'w'))\n\n\tif len(files) > 1:\n\t\tfor file in files:#walk_files_folder(folder):\n\t\t\tif \".DS_Store\" not in file:\n\t\t\t\tannonce_ref = xmltodict.parse(codecs.open(os.path.join(root, file), 'r', encoding='ISO-8859-1').read())[\"ANNONCE_REF\"]\n\n\t\t\t\tnum_annonce = encode(annonce_ref[\"@numannonce\"])\n\t\t\t\tdate_declaration = encode(annonce_ref[\"@datedeclaration\"])\n\t\t\t\tdept = encode(annonce_ref[\"@dept\"])\n\t\t\t\tcp = encode(annonce_ref[\"@cp\"])\n\t\t\t\ttype_annonce = encode(annonce_ref[\"TYPE\"][\"@code\"])\n\t\t\t\tlieu_declaration = encode(annonce_ref[\"LIEU_DECLARATION\"])\n\t\t\t\tancien_titre = encode(annonce_ref[\"ANCIEN_TITRE\"])\n\t\t\t\tnouveau_titre = encode(annonce_ref[\"NOUVEAU_TITRE\"])\n\t\t\t\tadditif_objet = encode(annonce_ref[\"ADDITIF_OBJET\"])\n\t\t\t\tnouvel_objet = encode(annonce_ref[\"NOUVEL_OBJET\"])\n\t\t\t\tobjet = encode(annonce_ref[\"OBJET\"])\n\t\t\t\ttitre = annonce_ref[\"TITRE\"].encode('utf8')\n\t\t\t\tsiege_social = encode(annonce_ref[\"SIEGE_SOCIAL\"])\n\t\t\t\tif \"INTERNET\" in annonce_ref:\n\t\t\t\t\tsmtp = encode(annonce_ref[\"INTERNET\"][\"@smtp\"])\n\t\t\t\t\thttp = encode(annonce_ref[\"INTERNET\"][\"@http\"])\n\t\t\t\telse:\n\t\t\t\t\tsmtp = \"\"\n\t\t\t\t\thttp = \"\"\n\t\t\t\tdate_parution = encode(annonce_ref[\"PARUTION_JO_ASSOCIATION\"][\"@dateparution\"])\n\n\t\t\t\tregex_code = re.compile(r'[0-9]{1,10}')\n\t\t\t\tif \"THEMES\" in annonce_ref:\n\t\t\t\t\tif annonce_ref[\"THEMES\"] == None:\n\t\t\t\t\t\tthemes = \"\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tthemes = str(annonce_ref[\"THEMES\"])\n\t\t\t\tresults = re.findall(regex_code, themes)\n\t\t\t\tthemas = '|'.join(results).encode('utf8')\n\n\t\t\t\trow = [date_parution,\n\t\t\t\t\tdate_declaration,\n\t\t\t\t\ttype_annonce,\n\t\t\t\t\tlieu_declaration,\n\t\t\t\t\ttitre,\n\t\t\t\t\tobjet,\n\t\t\t\t\tsiege_social,\n\t\t\t\t\tthemas,\n\t\t\t\t\tsmtp,\n\t\t\t\t\thttp,\n\t\t\t\t\tnouvel_objet,\n\t\t\t\t\tadditif_objet,\n\t\t\t\t\tancien_titre,\n\t\t\t\t\tcp,\n\t\t\t\t\tdept]\n\n\t\t\t\toutput.writerow(row)","repo_name":"diegantobass/polearth","sub_path":"DILA_asso/data/olddatatocsv.py","file_name":"olddatatocsv.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37434442989","text":"# Variáveis =\n# números\nvelociade_internet = 10\nprint(velociade_internet)\n# valores boleanos\nesta_aberto = True\n# Strings\nnome_do_curso = \"Lógica de programação\"\n\n# Como variáveis seriam usadas em um programa real?\n# Problema 1: Escreva o valor que um funcionário ganha por hora com base no seu salário mensal, em horas trabalhadas por mês.\n\nsalario_mensal = input(\"Qual é o seu salário mensal?\")\nhoras_trabalhadas_mensalmente = input(\"Quantas horas você trabalha por mês?\")\nvalor_hora = int(salario_mensal) / int(horas_trabalhadas_mensalmente)\nprint (valor_hora)","repo_name":"JoseAugusto7/Aulas","sub_path":"Phyton/Logica1.py","file_name":"Logica1.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11666023682","text":"from selenium import webdriver\r\nfrom selenium.webdriver.chrome import service as fs\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nimport numpy as np\r\nimport datetime\r\nimport time\r\nimport json\r\n\r\n# crawler class 爬蟲套件\r\nclass crawler:\r\n # @staticmethod\r\n # 擷取今日電力資訊 1.日期 2.更新時間 3.目前用電量 4.使用率 5.預估最高用電 6.尖峰使用率 7.今日最大供電能力 8.供電色號\r\n def electricityinfo_current(self, strDate=datetime.datetime.today().date()):\r\n # 今日日期\r\n dataTimeStampP1 = 'null'\r\n # 更新時間\r\n dataTimeStampP2 = 'null'\r\n # 目前用電量\r\n latest_load = 'null'\r\n # 使用率\r\n latest_load_perc = 'null'\r\n # 預估最高用電\r\n load_forecast_max = 'null'\r\n # 尖峰使用率\r\n load_forecast_max_perc = 'null'\r\n # 今日最大供電能力\r\n supply_arranged_max = 'null'\r\n # 供電狀況\r\n lightState = 'null'\r\n # 擷取次數\r\n num = 0\r\n try:\r\n print('--cralwer electricityinfo_current start--')\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n while num < 3:\r\n # print('try '+str(num+1))\r\n driver.get(\r\n \"https://www.taipower.com.tw/d006/loadGraph/loadGraph/load_briefing3.html\")\r\n driver.implicitly_wait(10)\r\n time.sleep(1)\r\n dataTimeStampP1 = driver.find_element(\r\n By.ID, 'dataTimeStampP1').text\r\n dataTimeStampP2 = driver.find_element(\r\n By.ID, 'dataTimeStampP2').text.replace(\"更新\", \"\")\r\n latest_load = driver.find_element(\r\n By.ID, 'latest_load').text.replace(\",\", \"\")\r\n latest_load_perc = driver.find_element(\r\n By.ID, 'latest_load_perc').text + \"%\"\r\n load_forecast_max = driver.find_element(\r\n By.ID, 'load_forecast_max').text.replace(\",\", \"\")\r\n load_forecast_max_perc = driver.find_element(\r\n By.ID, 'load_forecast_max_perc').text + \"%\"\r\n supply_arranged_max = driver.find_element(\r\n By.ID, 'supply_arranged_max').text.replace(\",\", \"\")\r\n #lighttext = driver.find_element(By.ID, 'lighttext').text[0:4]\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not latest_load == 'null' and not latest_load_perc == 'null' and not load_forecast_max == 'null' and not load_forecast_max_perc == 'null' and not supply_arranged_max == 'null':\r\n value = float(supply_arranged_max) - float(latest_load)\r\n percent = (value/float(supply_arranged_max)) * 100\r\n if percent > 10:\r\n lightState = '#00DD00'\r\n elif 10 > percent >= 6:\r\n lightState = '#FFFF00'\r\n elif percent < 6 and value > 90:\r\n lightState = '#FFA500'\r\n elif 90 > value >= 50:\r\n lightState = '#FF0000'\r\n elif value < 50:\r\n lightState = '#444444'\r\n break\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n print('--cralwer electricityinfo_current end--')\r\n data = {'dataTimeStampP1': dataTimeStampP1, 'dataTimeStampP2': dataTimeStampP2, 'latest_load': latest_load, 'latest_load_perc': latest_load_perc,\r\n 'load_forecast_max': load_forecast_max, 'load_forecast_max_perc': load_forecast_max_perc, 'supply_arranged_max': supply_arranged_max, 'lightState': lightState}\r\n return data\r\n\r\n # 擷取昨日電力供需資訊 1.昨日日期 2.昨日最高用電量 3.尖峰備轉容量率\r\n def electricityInfo_yday(self, strDate=datetime.datetime.today().date()):\r\n # 昨日日期\r\n ydaytime = 'null'\r\n # 昨日最高用電量\r\n load_max_yday = 'null'\r\n # 尖峰備轉容量率\r\n rsv_perc_yday = 'null'\r\n # 擷取次數\r\n num = 0\r\n try:\r\n print('--cralwer electricityInfo_yday start--')\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n driver.get(\r\n \"https://www.taipower.com.tw/d006/loadGraph/loadGraph/load_reserve_.html\")\r\n driver.implicitly_wait(10)\r\n time.sleep(1)\r\n ydaytime = driver.find_element(By.ID, 'ydaytime').text\r\n load_max_yday = driver.find_element(\r\n By.ID, 'load_max_yday').text.replace(\",\", \"\")\r\n rsv_perc_yday = driver.find_element(\r\n By.ID, 'rsv_perc_yday').text + \"%\"\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not ydaytime == 'null' and not load_max_yday == 'null' and not rsv_perc_yday == 'null':\r\n break\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n print('--cralwer electricityInfo_yday end--')\r\n data = {'ydaytime': ydaytime, 'load_max_yday': load_max_yday,\r\n 'rsv_perc_yday': rsv_perc_yday}\r\n return data\r\n\r\n # 擷取未來電力供需資訊(一周) 1.更新日期 2.日期 3.day 4.淨尖峰供電能力 5.尖峰負載 6.備轉容量 7.備轉容量率 8.備轉容量供電色號\r\n def electricityInfo_future(self, strDate=datetime.datetime.today().date()):\r\n data = []\r\n try:\r\n print('--cralwer electricityInfo_future start--')\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n for i in range(1, 8):\r\n # 更新日期\r\n datetime = 'null'\r\n # 日期\r\n dateStr = 'null'\r\n # day\r\n dayStr = 'null'\r\n # 淨尖峰供電能力\r\n supply = 'null'\r\n # 尖峰負載\r\n load = 'null'\r\n # 備轉容量\r\n value = 'null'\r\n # 備轉容量率\r\n percent = 'null'\r\n # 備轉容量燈號\r\n lightState = 'null'\r\n # 擷取次數\r\n num = 0\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n driver.get(\r\n \"https://www.taipower.com.tw/d006/loadGraph/loadGraph/load_forecast_.html\")\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n datetime = driver.find_element(By.ID, 'datetime').text\r\n dateStr = driver.find_element(\r\n By.ID, 'date'+str(i)).text[0:5]\r\n dayStr = driver.find_element(\r\n By.ID, 'date'+str(i)).text[7:10]\r\n supply = driver.find_element(By.ID, 'supply'+str(i)).text\r\n load = driver.find_element(By.ID, 'load'+str(i)).text\r\n value = int(driver.find_element(\r\n By.ID, 'value'+str(i)).text)\r\n percent = float(driver.find_element(\r\n By.XPATH, '//*[@id=\"percent'+str(i)+'\"]/td').text.strip('%'))\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not dateStr == 'null' and not dayStr == 'null' and not supply == 'null' and not load == 'null' and not value == 'null' and not percent == 'null':\r\n if percent > 10:\r\n lightState = '#00DD00'\r\n elif 10 > percent >= 6:\r\n lightState = '#FFFF00'\r\n elif percent < 6 and value > 90:\r\n lightState = '#FFA500'\r\n elif 90 > value >= 50:\r\n lightState = '#FF0000'\r\n elif value < 50:\r\n lightState = '#444444'\r\n data.append({'datetime': datetime, 'dateStr': dateStr, 'dayStr': dayStr, 'supply': supply,\r\n 'load': load, 'value': value, 'percent': str(percent) + \"%\", 'lightState': lightState})\r\n break\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if num == 3:\r\n data.append({'datetime': datetime, 'dateStr': dateStr, 'dayStr': dayStr, 'supply': supply,\r\n 'load': load, 'value': value, 'percent': percent, 'lightState': lightState})\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n print('--cralwer electricityInfo_future end--')\r\n return data\r\n\r\n # 擷取太陽能 1.更新日期 2.裝置容量 3.淨發電量 4.裝置容量淨發電量比\r\n def solar_info(self, strDate=datetime.datetime.today().date()):\r\n # 更新日期\r\n datetime = 'null'\r\n # 裝置容量\r\n capacity_stored = 'null'\r\n # 淨發電量\r\n electricity_stored = 'null'\r\n # 裝置容量/淨發電量比\r\n percent = 'null'\r\n # 擷取次數\r\n num = 0\r\n try:\r\n print('--cralwer solar_info start--')\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n driver.get(\r\n \"https://www.taipower.com.tw/d006/loadGraph/loadGraph/genshx_.html\")\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n datetime = driver.find_element(By.ID, 'datetime').text\r\n capacity_stored = driver.find_element(\r\n By.XPATH, '//*[@id=\"unitgentab\"]/tbody/tr[185]/td[2]').text\r\n capacity_stored = float(\r\n capacity_stored[0:capacity_stored.find('(')])\r\n electricity_stored = driver.find_element(\r\n By.XPATH, '//*[@id=\"unitgentab\"]/tbody/tr[185]/td[3]').text[0:5]\r\n electricity_stored = float(\r\n electricity_stored[0:electricity_stored.find('(')])\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not datetime == 'null' and not capacity_stored == 'null' and not electricity_stored == 'null':\r\n if electricity_stored == 0.0:\r\n percent = round(capacity_stored/100, 2)\r\n else:\r\n percent = round(\r\n (electricity_stored/capacity_stored)*100, 2)\r\n break\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n data = {'datetime': datetime, 'capacity_stored': str(\r\n capacity_stored), 'electricity_stored': str(electricity_stored), 'percent': str(percent) + \"%\"}\r\n print('--cralwer solar_info end--')\r\n return data\r\n\r\n # 擷取電力交易平台平均結清價格 1.調頻備轉 2.即時備轉 3.補充備轉\r\n def electricity_deal(self, strDate=datetime.datetime.today().date()):\r\n # 調頻備轉\r\n FMTransferAvePrice = 'null'\r\n # 即時備轉\r\n realtimeTransferAvePrice = 'null'\r\n # 補充備轉\r\n fartherTransferAvePrice = 'null'\r\n # 擷取次數\r\n num = 0\r\n try:\r\n print('--cralwer electricity_deal start--')\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n driver.get(\"https://etp.taipower.com.tw/\")\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n FMTransferAvePrice = driver.find_element(\r\n By.XPATH, '//table[@class=\"announce_board\" and position()=1]/tbody/tr[2]/td[2]').text\r\n realtimeTransferAvePrice = driver.find_element(\r\n By.XPATH, '//table[@class=\"announce_board\" and position()=1]/tbody/tr[3]/td[2]').text\r\n fartherTransferAvePrice = driver.find_element(\r\n By.XPATH, '//table[@class=\"announce_board\" and position()=1]/tbody/tr[4]/td[2]').text\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not FMTransferAvePrice == 'null' and not realtimeTransferAvePrice == 'null' and not fartherTransferAvePrice == 'null':\r\n break\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n data = {'FMTransferAvePrice': FMTransferAvePrice, 'realtimeTransferAvePrice':\r\n realtimeTransferAvePrice, 'fartherTransferAvePrice': fartherTransferAvePrice}\r\n print('--cralwer electricity_deal end--')\r\n return data\r\n\r\n # 擷取電力交易平台即時備轉 當天24小時1.小時 2.得標容量(國營) 3.得標容量(民營) 4.得標容量(非交易) 5.結清價格\r\n def electricity_deal_realtimeStored(self, eacHourValue, strDate=datetime.datetime.today().date()):\r\n print('--cralwer electricity_deal_realtimeStored start--')\r\n uml = r'https://etp.taipower.com.tw/'\r\n hourlyList = []\r\n futures = []\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n pool = ThreadPoolExecutor(max_workers=4)\r\n # 多工執行\r\n for i in range(0, 24, 6):\r\n future = pool.submit(eacHourValue, options, chrome_service, i, '6')\r\n futures.append(future)\r\n pool.shutdown()\r\n for fu in futures:\r\n hourlyList.extend(fu.result())\r\n print('--cralwer electricity_deal_realtimeStored end--')\r\n return hourlyList\r\n\r\n # 擷取電力交易平台補充備轉 當天24小時1.小時 2.得標容量(國營) 3.得標容量(民營) 4.得標容量(非交易) 5.結清價格\r\n def electricity_deal_replenishStore(self, eacHourValue, strDate=datetime.datetime.today().date()):\r\n print('--cralwer electricity_deal_replenishStore start--')\r\n hourlyList = []\r\n futures = []\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n pool = ThreadPoolExecutor(max_workers=4)\r\n # 多工執行\r\n for i in range(0, 24, 6):\r\n future = pool.submit(eacHourValue, options, chrome_service, i, '7')\r\n futures.append(future)\r\n pool.shutdown()\r\n for fu in futures:\r\n hourlyList.extend(fu.result())\r\n print('--cralwer electricity_deal_replenishStore end--')\r\n return hourlyList\r\n\r\n def eacHourValue(options, chrome_service, h, n):\r\n print('--cralwer eacHourValue start--')\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n driver.get('https://etp.taipower.com.tw/')\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n hourlyList = []\r\n actions = ActionChains(driver)\r\n # print('start', h, 'h value get')\r\n for i in range(h, h + 6):\r\n StateOwnedStored = 'null'\r\n # 得標容量(民營)\r\n investorownedStored = 'null'\r\n # 得標容量(非交易)\r\n nodealStored = 'null'\r\n # 結清價格\r\n price = 'null'\r\n for j in range(0, 2):\r\n actions.move_to_element(driver.find_element(\r\n By.XPATH, '(//*[@class=\"recharts-layer recharts-bar-rectangles\"])[' + n + ']/*/*[' + str(i + 1) + ']'))\r\n actions.perform()\r\n time.sleep(0.2)\r\n StateOwnedStored = driver.find_elements(\r\n By.CLASS_NAME, \"recharts-tooltip-item-value\")[0].text\r\n investorownedStored = driver.find_elements(\r\n By.CLASS_NAME, \"recharts-tooltip-item-value\")[1].text\r\n nodealStored = driver.find_elements(\r\n By.CLASS_NAME, \"recharts-tooltip-item-value\")[2].text\r\n price = driver.find_elements(\r\n By.CLASS_NAME, \"recharts-tooltip-item-value\")[3].text\r\n hourly = {'hour': str(i), 'StateOwnedStored': str(StateOwnedStored), 'investorownedStored': str(\r\n investorownedStored), 'nodealStored': str(nodealStored), 'price': str(price)}\r\n hourlyList.append(hourly)\r\n # print('end', h, 'h value get')\r\n print('--cralwer eacHourValue end--')\r\n return hourlyList\r\n\r\n # 擷取交通部氣象局彰化縣鹿港鎮 1.地區 2.明天日期 3.時段 4.溫度 5.降雨機率\r\n def cwb_LugangInfo(self, strDate=datetime.datetime.today().date()):\r\n print('--cralwer cwb_LugangInfo start--')\r\n cwbinfoList = []\r\n PC3_D = '1' if datetime.datetime.now().hour >= 22 else '2'\r\n IDList = [[PC3_D, '00', '00', '03'], [PC3_D, '03', '00', '03'], [PC3_D, '06', '06', '09'], [PC3_D, '09', '06', '09'], [\r\n PC3_D, '12', '12', '15'], [PC3_D, '15', '12', '15'], [PC3_D, '18', '18', '21'], [PC3_D, '21', '18', '21']]\r\n\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n for i in range(len(IDList)):\r\n # 明天日期\r\n date = 'null'\r\n # 時段\r\n period = 'null'\r\n # 溫度\r\n temperature = 'null'\r\n # 降雨機率\r\n PofP = 'null'\r\n # 擷取次數\r\n num = 0\r\n cwbinfo = {'district': '彰化縣鹿港鎮', 'date': date,\r\n 'period': period, 'temperature': temperature, 'PofP': PofP}\r\n try:\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n driver.get(\r\n \"https://www.cwb.gov.tw/V8/C/W/Town/Town.html?TID=1000702\")\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n date = driver.find_element(\r\n By.ID, 'PC3_D' + IDList[i][0] + '').text.replace(\"\\n\", \" \")\r\n period = driver.find_element(\r\n By.XPATH, '//*[@id=\"PC3_D' + IDList[i][0] + 'H' + IDList[i][1] + '\"]/span').text\r\n temperature = driver.find_element(\r\n By.XPATH, '//*[@headers=\"PC3_T PC3_D' + IDList[i][0] + ' PC3_D' + IDList[i][0] + 'H' + IDList[i][1] + '\"]/span[1]').text\r\n PofP = driver.find_element(By.XPATH, '//*[@headers=\"PC3_Po PC3_D' + IDList[i][0] + ' PC3_D' +\r\n IDList[i][0] + 'H' + IDList[i][2] + ' PC3_D' + IDList[i][0] + 'H' + IDList[i][3] + '\"]').text\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not date == 'null' and not period == 'null' and not temperature == 'null' and not PofP == 'null':\r\n cwbinfo = {'district': '彰化縣鹿港鎮', 'date': date,\r\n 'period': period, 'temperature': temperature, 'PofP': PofP}\r\n break\r\n cwbinfoList.append(cwbinfo)\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n\r\n data = cwbinfoList\r\n print('--cralwer cwb_LugangInfo end--')\r\n return data\r\n\r\n # 擷取交通部氣象局雲林縣崙背鄉 1.地區 2.明天日期 3.時段 4.溫度 5.降雨機率\r\n def cwb_LunbeiInfo(self, strDate=datetime.datetime.today().date()):\r\n print('--cralwer cwb_LunbeiInfo start--')\r\n cwbinfoList = []\r\n PC3_D = '1' if datetime.datetime.now().hour >= 22 else '2'\r\n IDList = [[PC3_D, '00', '00', '03'], [PC3_D, '03', '00', '03'], [PC3_D, '06', '06', '09'], [PC3_D, '09', '06', '09'], [\r\n PC3_D, '12', '12', '15'], [PC3_D, '15', '12', '15'], [PC3_D, '18', '18', '21'], [PC3_D, '21', '18', '21']]\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n for i in range(len(IDList)):\r\n # 明天日期\r\n date = 'null'\r\n # 時段\r\n period = 'null'\r\n # 溫度\r\n temperature = 'null'\r\n # 降雨機率\r\n PofP = 'null'\r\n # 擷取次數\r\n num = 0\r\n cwbinfo = {'district': '雲林縣崙背鄉', 'date': date,\r\n 'period': period, 'temperature': temperature, 'PofP': PofP}\r\n try:\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n driver.get(\r\n \"https://www.cwb.gov.tw/V8/C/W/Town/Town.html?TID=1000912\")\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n date = driver.find_element(\r\n By.ID, 'PC3_D' + IDList[i][0] + '').text.replace(\"\\n\", \" \")\r\n period = driver.find_element(\r\n By.XPATH, '//*[@id=\"PC3_D' + IDList[i][0] + 'H' + IDList[i][1] + '\"]/span').text\r\n temperature = driver.find_element(\r\n By.XPATH, '//*[@headers=\"PC3_T PC3_D' + IDList[i][0] + ' PC3_D' + IDList[i][0] + 'H' + IDList[i][1] + '\"]/span[1]').text\r\n PofP = driver.find_element(By.XPATH, '//*[@headers=\"PC3_Po PC3_D' + IDList[i][0] + ' PC3_D' +\r\n IDList[i][0] + 'H' + IDList[i][2] + ' PC3_D' + IDList[i][0] + 'H' + IDList[i][3] + '\"]').text\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not date == 'null' and not period == 'null' and not temperature == 'null' and not PofP == 'null':\r\n cwbinfo = {'district': '雲林縣崙背鄉', 'date': date,\r\n 'period': period, 'temperature': temperature, 'PofP': PofP}\r\n break\r\n cwbinfoList.append(cwbinfo)\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n\r\n data = cwbinfoList\r\n print('--cralwer cwb_LunbeiInfo end--')\r\n return data\r\n\r\n # 擷取交通部氣象局嘉義縣布袋鎮 1.地區 2.明天日期 3.時段 4.溫度 5.降雨機率\r\n def cwb_BudaiInfo(self, strDate=datetime.datetime.today().date()):\r\n print('--cralwer cwb_BudaiInfo start--')\r\n cwbinfoList = []\r\n PC3_D = '1' if datetime.datetime.now().hour >= 22 else '2'\r\n IDList = [[PC3_D, '00', '00', '03'], [PC3_D, '03', '00', '03'], [PC3_D, '06', '06', '09'], [PC3_D, '09', '06', '09'], [\r\n PC3_D, '12', '12', '15'], [PC3_D, '15', '12', '15'], [PC3_D, '18', '18', '21'], [PC3_D, '21', '18', '21']]\r\n\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n for i in range(len(IDList)):\r\n # 明天日期\r\n date = 'null'\r\n # 時段\r\n period = 'null'\r\n # 溫度\r\n temperature = 'null'\r\n # 降雨機率\r\n PofP = 'null'\r\n # 擷取次數\r\n num = 0\r\n cwbinfo = {'district': '嘉義縣布袋鎮', 'date': date,\r\n 'period': period, 'temperature': temperature, 'PofP': PofP}\r\n try:\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n driver.get(\r\n \"https://www.cwb.gov.tw/V8/C/W/Town/Town.html?TID=1001003\")\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n date = driver.find_element(\r\n By.ID, 'PC3_D' + IDList[i][0] + '').text.replace(\"\\n\", \" \")\r\n period = driver.find_element(\r\n By.XPATH, '//*[@id=\"PC3_D' + IDList[i][0] + 'H' + IDList[i][1] + '\"]/span').text\r\n temperature = driver.find_element(\r\n By.XPATH, '//*[@headers=\"PC3_T PC3_D' + IDList[i][0] + ' PC3_D' + IDList[i][0] + 'H' + IDList[i][1] + '\"]/span[1]').text\r\n PofP = driver.find_element(By.XPATH, '//*[@headers=\"PC3_Po PC3_D' + IDList[i][0] + ' PC3_D' +\r\n IDList[i][0] + 'H' + IDList[i][2] + ' PC3_D' + IDList[i][0] + 'H' + IDList[i][3] + '\"]').text\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not date == 'null' and not period == 'null' and not temperature == 'null' and not PofP == 'null':\r\n cwbinfo = {'district': '嘉義縣布袋鎮', 'date': date,\r\n 'period': period, 'temperature': temperature, 'PofP': PofP}\r\n break\r\n cwbinfoList.append(cwbinfo)\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n\r\n data = cwbinfoList\r\n print('--cralwer cwb_BudaiInfo end--')\r\n return data\r\n\r\n # 擷取交通部氣象局臺南市七股區 1.地區 2.明天日期 3.時段 4.溫度 5.降雨機率\r\n def cwb_QiguInfo(self, strDate=datetime.datetime.today().date()):\r\n print('--cralwer cwb_QiguInfo start--')\r\n cwbinfoList = []\r\n PC3_D = '1' if datetime.datetime.now().hour >= 22 else '2'\r\n IDList = [[PC3_D, '00', '00', '03'], [PC3_D, '03', '00', '03'], [PC3_D, '06', '06', '09'], [PC3_D, '09', '06', '09'], [\r\n PC3_D, '12', '12', '15'], [PC3_D, '15', '12', '15'], [PC3_D, '18', '18', '21'], [PC3_D, '21', '18', '21']]\r\n\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(service=chrome_service, options=options)\r\n for i in range(len(IDList)):\r\n # 明天日期\r\n date = 'null'\r\n # 時段\r\n period = 'null'\r\n # 溫度\r\n temperature = 'null'\r\n # 降雨機率\r\n PofP = 'null'\r\n # 擷取次數\r\n num = 0\r\n cwbinfo = {'district': '臺南市七股區', 'date': date,\r\n 'period': period, 'temperature': temperature, 'PofP': PofP}\r\n try:\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n driver.get(\r\n \"https://www.cwb.gov.tw/V8/C/W/Town/Town.html?TID=6701500\")\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n date = driver.find_element(\r\n By.ID, 'PC3_D' + IDList[i][0] + '').text.replace(\"\\n\", \" \")\r\n period = driver.find_element(\r\n By.XPATH, '//*[@id=\"PC3_D' + IDList[i][0] + 'H' + IDList[i][1] + '\"]/span').text\r\n temperature = driver.find_element(\r\n By.XPATH, '//*[@headers=\"PC3_T PC3_D' + IDList[i][0] + ' PC3_D' + IDList[i][0] + 'H' + IDList[i][1] + '\"]/span[1]').text\r\n PofP = driver.find_element(By.XPATH, '//*[@headers=\"PC3_Po PC3_D' + IDList[i][0] + ' PC3_D' +\r\n IDList[i][0] + 'H' + IDList[i][2] + ' PC3_D' + IDList[i][0] + 'H' + IDList[i][3] + '\"]').text\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not date == 'null' and not period == 'null' and not temperature == 'null' and not PofP == 'null':\r\n cwbinfo = {'district': '臺南市七股區', 'date': date,\r\n 'period': period, 'temperature': temperature, 'PofP': PofP}\r\n break\r\n cwbinfoList.append(cwbinfo)\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n\r\n data = cwbinfoList\r\n print('--cralwer cwb_QiguInfo end--')\r\n return data\r\n\r\n # 擷取今日電力供需資訊函數 1.備轉容量率\r\n def electricity_today(self, strDate=datetime.datetime.today().date()):\r\n # 備轉容量率\r\n reserve = 'null'\r\n # 擷取次數\r\n num = 0\r\n try:\r\n print('--cralwer electricity_today start--')\r\n while num < 3:\r\n # print('try ' + str(num + 1))\r\n options = Options()\r\n options.add_argument('--headless')\r\n chrome_service = fs.Service(\r\n executable_path=ChromeDriverManager().install())\r\n driver = webdriver.Chrome(\r\n service=chrome_service, options=options)\r\n driver.get(\r\n \"https://www.taipower.com.tw/d006/loadGraph/loadGraph/load_reserve_.html\")\r\n driver.implicitly_wait(10)\r\n # time.sleep(1)\r\n reserve = driver.find_element(\r\n By.XPATH, '//*[@id=\"reserve\"]/span').text\r\n num += 1\r\n # 確認是否擷取到數據 沒有最多retry三次\r\n if not reserve == 'null':\r\n break\r\n # 網頁擷取錯誤例外處理\r\n except:\r\n print('error occured')\r\n pass\r\n\r\n date = {'reserve': reserve}\r\n print('--cralwer electricity_today end--')\r\n return date\r\n","repo_name":"jony371400/Project-DashboardApp","sub_path":"ePower/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":32095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75136331145","text":"class Solution:\n def jobScheduling(self, startTime, endTime, profit) -> int:\n n = len(startTime)\n max_profit = [0]*n\n prev_best = 0\n events = list()\n START, END = 1, 0\n # do using line sweep\n # sort the events like start and end\n for idx, (start, end) in enumerate(zip(startTime, endTime)):\n events.append((start, START, idx))\n events.append((end, END, idx))\n events.sort()\n # if it is start event take the sum of profit and prev_best else just update the prev_best\n for x, e, idx in events:\n if e == START:\n max_profit[idx] = max(max_profit[idx], profit[idx]+prev_best)\n else:\n prev_best = max(prev_best, max_profit[idx])\n\n return max(max_profit)\n","repo_name":"bamblebam/competitive-programming","sub_path":"2021/8-August-21/28-8-21/maxprofitinjobscheduling.py","file_name":"maxprofitinjobscheduling.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12756859205","text":"import socket\nimport logging\nimport urllib.parse\nimport urllib.request\nfrom urllib.error import URLError\n\n# timeout in seconds\ntimeout = 6\nsocket.setdefaulttimeout(timeout)\nlogger = logging.getLogger(__name__)\n\n\ndef pull_price(symbol, startd=None, endd=None, period='d'):\n values = {}\n values['s'] = symbol\n if startd:\n values['a'] = startd.month - 1\n values['b'] = startd.day\n values['c'] = startd.year\n if endd:\n values['d'] = endd.month - 1\n values['e'] = endd.day\n values['f'] = endd.year\n values['g'] = period\n values['ignore'] = '.csv'\n\n url = r'http://ichart.finance.yahoo.com/table.csv'\n data = urllib.parse.urlencode(values)\n # usr_agent='Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)'\n # header={'User-Agent':usr_agent}\n # req = urllib.request.Request(url+'?'+data, headers=header)\n req = urllib.request.Request(url + '?' + data)\n # req = urllib.request.Request('http://ichart.finance.yahoo.com/table.csv?g=m&s=600000.SS')\n try:\n response = urllib.request.urlopen(req)\n content = response.read().decode()\n response.close()\n return content\n except (URLError, ConnectionResetError, socket.timeout) as e:\n logger.debug(str(e.__class__) + str(e))\n logger.debug(req.full_url)\n raise e","repo_name":"kongscn/corpdb","sub_path":"utils/pullprice.py","file_name":"pullprice.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43778818749","text":"\nfrom urllib import request\nfrom AppCoder.forms import ContactoFormulario, GaleriaFormulario, TallerFormulario\nfrom AppCoder.models import Contacto, Galeria, Taller\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\n# Create your views here.\n\n\n\ndef inicio(request):\n return render(request, \"AppCoder/index.html\")\n\n\ndef creacion_Galeria(request):\n\n if request.method == \"POST\":\n formulario = GaleriaFormulario(request.POST)\n if formulario.is_valid():\n data = formulario.cleaned_data\n galeria = Galeria(Nombre=data[\"Nombre\"], Descripcion=data[\"Descripcion\"], Imagen=data[\"Imagen\"] )\n galeria.save()\n formulario = GaleriaFormulario()\n return render(request, \"AppCoder/Galeria.html\", {\"formulario\": formulario})\n\n\n\ndef creacion_Taller(request):\n\n if request.method == \"POST\":\n formulario = TallerFormulario(request.POST)\n if formulario.is_valid():\n data = formulario.cleaned_data\n taller = Taller(Nombre=data[\"Nombre\"], Apellido=data[\"Apellido\"], Email=data[\"Email\"])\n taller.save()\n formulario = TallerFormulario()\n return render(request, \"AppCoder/Taller.html\", {\"formulario\": formulario})\n\n\n\n\n\ndef creacion_Contacto(request):\n\n if request.method == \"POST\":\n formulario = ContactoFormulario(request.POST)\n if formulario.is_valid():\n data = formulario.cleaned_data\n contacto = Contacto(Nombre=data[\"Nombre\"], Email=data[\"Email\"], Mensaje=data[\"Mensaje\"])\n contacto.save()\n formulario = ContactoFormulario()\n return render(request, \"AppCoder/Contacto.html\", {\"formulario\": formulario})\n\n\n \n\ndef buscar_alumnos(request):\n if request.GET:\n estudiantes = Taller.objects.filter(Nombre__icontains=request.GET[\"nombre_alumno\"])\n return render(request, \"AppCoder/busquedaAlumnos.html\", {\"listado_alumnos\": estudiantes}) \n return render(request, \"AppCoder/busquedaAlumnos.html\", {\"listado_alumnos\": []}) \n ","repo_name":"sandraauditore/EntregaAuditore","sub_path":"Entrega1Auditore/AppCoder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73802156744","text":"from loader import bot, dp, html, Data, db\nfrom aiogram.types import Message, CallbackQuery\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import CommandStart \nfrom src.others import messages as mes\nfrom src.keyboarbs.reply import main_menu\nfrom src.keyboarbs.inline import timezone_menu, func_menu, back_but\nfrom datetime import datetime, time\n\n\n@dp.message_handler(CommandStart(), state=\"*\")\nasync def start_handler(msg: Message, state: FSMContext):\n await state.finish()\n await msg.answer(text=mes.start_message, reply_markup=main_menu)\n \n@dp.message_handler(content_types=['text'])\nasync def menu_handler(msg: Message, state: FSMContext):\n await state.finish()\n if msg.text == 'Меню':\n await msg.answer(\"Укажите ваш часовой пояс.\", reply_markup=timezone_menu)\n\n@dp.callback_query_handler(text='back', state=Data.date) \n@dp.callback_query_handler(text_contains='zone_')\nasync def timezone_handler(call: CallbackQuery, state: FSMContext):\n if call.data != 'back':\n await state.update_data(gmt= int(call.data.split('_')[1]))\n await call.message.edit_text(\"Выберите.\", reply_markup=func_menu)\n await Data.Q1.set()\n\n@dp.callback_query_handler(text='back', state=Data.time) \n@dp.callback_query_handler(text_contains='func_', state=Data.Q1)\nasync def func_handler(call: CallbackQuery, state: FSMContext):\n if call.data != 'back':\n await state.update_data(func= call.data.split('_')[1])\n await call.message.edit_text(\"Укажите дату (дд.мм.гггг)\", reply_markup=back_but)\n await Data.date.set()\n\n@dp.callback_query_handler(text='back', state=Data.remind) \nasync def back_date(call: CallbackQuery, state: FSMContext):\n data= await state.get_data()\n if data.get('func') == 'alarm':\n view= 'будильника'\n if data.get('func') == 'notify':\n view= 'напоминания'\n await call.message.answer(f\"Укажите время {view} в 24-ом формате (чч:мм)\", reply_markup=back_but)\n await Data.time.set()\n\n@dp.message_handler(content_types=['text'], state=Data.date)\nasync def date_handler(msg: Message, state: FSMContext):\n try:\n datetime.strptime(msg.text, '%d.%m.%Y')\n await state.update_data(date= msg.text)\n data= await state.get_data()\n if data.get('func') == 'alarm':\n view= 'будильника'\n if data.get('func') == 'notify':\n view= 'напоминания'\n await msg.answer(f\"Укажите время {view} в 24-ом формате (чч:мм)\", reply_markup=back_but)\n await Data.time.set()\n except Exception as ex:\n print(ex)\n await msg.answer(text=mes.error_input)\n await Data.date.set()\n \n \n@dp.message_handler(content_types=['text'], state=Data.time)\nasync def time_handler(msg: Message, state: FSMContext):\n data= await state.get_data()\n dt= f\"{data.get('date').strip()} {msg.text.strip()}\"\n try:\n new_dt= datetime.strptime(dt, '%d.%m.%Y %H:%M')\n await state.update_data(date=new_dt) \n if data.get('func') == 'alarm':\n gmt=data.get('gmt')\n db.add(\n user_id=msg.chat.id,\n dt=new_dt,\n gmt=gmt\n )\n await msg.answer(\"Пожалуйста, не отключайте уведомления от Бота\", parse_mode=html)\n await state.finish()\n if data.get('func') == 'notify':\n await msg.answer(\"Введите текст напоминания.\", reply_markup=back_but)\n await Data.remind.set()\n except Exception as ex:\n print(ex)\n await msg.answer(text=mes.error_input)\n await Data.time.set()\n \n@dp.message_handler(content_types=['text'], state=Data.remind)\nasync def remind_handler(msg: Message, state: FSMContext):\n data= await state.get_data()\n gmt=data.get('gmt')\n new_dt=data.get('date')\n db.add(\n user_id=msg.chat.id,\n dt=new_dt,\n gmt=gmt,\n text=msg.text\n )\n await msg.answer(\"Пожалуйста, не отключайте уведомления от Бота\", parse_mode=html)\n await state.finish()","repo_name":"jadecis/notify_bot","sub_path":"src/handlers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11416515504","text":"import pickle\nimport uuid\n\nimport click\n\n# utilities\nfrom bench_search import bench_search\n\n\n@click.command()\n@click.option('--version', default=0, help='Code version, 0=sklearn, 1=dasksearchcv,'\n ' 2=async, 3=async-with-cacheplugin')\n@click.option('--lparam', default=4, help='number of parameters used in grid')\n@click.option('--rstate', default=0, help='random state for SGD classifier')\n@click.option('--refit', default=1, help='do refit')\n@click.option('--occupancy', default=2, help='occupancy factor')\n@click.option('--outfile', default=None, help='output file name')\ndef main(version, lparam, rstate, refit, occupancy, outfile):\n results = bench_search(version, lparam, rstate, refit, occupancy)\n results['params'] = {\n 'version': version,\n 'lparam': lparam,\n 'rstate': rstate,\n 'refit': refit,\n 'occupancy': occupancy\n }\n # save the results to pickle\n if outfile is None:\n outfile = uuid.uuid4()\n\n with open('/data/{}.pkl'.format(outfile), 'wb') as f:\n pickle.dump(results, f)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"thomasgreg/dcv-benchmarks","sub_path":"bench_async_search.py","file_name":"bench_async_search.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17486871726","text":"import cherrypy\nimport pymongo\nimport urllib2\nfrom simplegeo import Client\nfrom pymongo import Connection\nfrom Cheetah.Template import Template\n\n\nconnection = Connection()\ndb = connection['meetme']\nclient = Client('p2R3QHMxH3xZV6SAgeTdb6sqrxG6Qk8f','XbF4sZxLyhNDMzjykHmVysbzYtnbEtCn')\n\n\nclass Index(object):\n def group(self,name=\"\",error=\"\"):\n\n group_collection = db.Groups\n group = group_collection.find_one({\"name\":name})\n\n grouppage_t = open('grouppage.tmpl', 'r')\n self.grouppage_template = grouppage_t.read()\n\n g_exist = self.group_exist(name)\n if g_exist > 0:\n max_num = group[\"max\"]\n cur_num = group[\"joined\"]\n namespace = {\"cur_num\":cur_num,\"max_num\":max_num}\n namespace[\"error\"] = error\n namespace[\"name\"] = name\n\n if g_exist == 1:\n\n #group does exist (GOOD)\n\n namespace[\"full\"] = 0\n namespace[\"map\"] = None\n# return \"Group exists\"\n return str(Template(self.grouppage_template, namespace))\n elif g_exist == 2:\n namespace[\"full\"] = 1\n #group full!\n dicti = None\n if not group[\"finalized\"]:\n \n dicti = self.set_final(group)\n else:\n dicti = group\n namespace[\"map\"] = dicti[\"ll\"]\n\n# return req\n return str(Template(self.grouppage_template, namespace))\n elif g_exist == 0:\n #error page\n return \"No group \\\"\" + name + \"\\\" exists\"\n #group does not exist (BAD)\n elif g_exist == -1:\n #error page\n return \"I need a group name\"\n #empty name\n else:\n\n return \"Error!\"\n #what the fuck\n #error page\n\n group.exposed = True\n\n def set_final(self,group = None):\n dict_ll = self.get_lat_lng(group)\n lat = dict_ll[\"lat\"]\n lng = dict_ll[\"lng\"]\n name = group[\"name\"]\n joined = group[\"joined\"]\n maxe = group[\"max\"]\n pos = group[\"pos\"]\n \n\n req = client.places.search(lat,lng)[0].to_dict()\n coords = req[\"geometry\"][\"coordinates\"]\n lat, lng = coords[0] , coords[1]\n prop = req[\"properties\"]\n new_name = prop[\"name\"]\n addr, city, state = prop[\"address\"], prop[\"city\"], prop[\"province\"]\n\n group_collection = db.Groups\n group_collection.update({\"name\":name},{\"name\":name,\"joined\":joined, \"max\":maxe, \"pos\":pos,\"ll\":{\"lat\":str(lat),\"lng\":str(lng),\"name\":new_name},\"addr\":addr, \"city\": city, \"state\":state, \"finalized\":1},upsert=True,safe=True)\n return {\"ll\":{\"lat\":lat,\"lng\":lng,\"name\":new_name},\"addr\":addr, \"city\": city, \"state\":state}\n\n\n def index(self):\n homepage_t = open('homepage.tmpl', 'r')\n self.homepage_template = homepage_t.read()\n return str(Template(self.homepage_template))\n index.exposed = True\n\n def add(self,name=\"\",lat=\"\",lng=\"\"):\n group_collection = db.Groups\n# return str(self.group_exist(name))\n try:\n group_collection.update({\"name\":name},{\"$push\":{\"pos\":{\"lat\":str(lat),\"lng\":str(lng)}},\"$inc\":{\"joined\":1}},safe=True)\n except pymongo.errors.OperationFailure:\n return str(False)\n except:\n return str(False)\n \n group_t = open('group.tmpl', 'r')\n self.group_template = group_t.read()\n return str(Template(self.group_template, {\"name\":name})) \n add.exposed = True\n\n def get_lat_lng(self,group = None):\n tups, num = group[\"pos\"], int(group[\"max\"])\n\n lngs, lats = [float(x[\"lng\"]) for x in tups], [float(x[\"lat\"]) for x in tups]\n avg_lng, avg_lat = reduce(lambda x,y:x+y,lngs)/num, reduce(lambda x,y:x+y,lats)/num\n return {\"lat\":avg_lat,\"lng\":avg_lng}\n\n def group_exist(self,name=\"\"):\n if not (name==\"\"):\n group_collection = db.Groups\n group = group_collection.find_one({\"name\":name})\n if group:\n if group[\"joined\"] is not group[\"max\"]:\n #not full\n return 1\n else:\n #full\n return 2\n else:\n return 0\n else:\n return -1\n\n def new(self,name=\"\",number=\"\",lat=\"\",lng=\"\"):\n g_exist = self.group_exist(name)\n if g_exist == 0:\n #group does not exist (good)\n group_collection = db.Groups\n try:\n group_collection.insert({\"name\":name,\"joined\":1,\"max\":int(number),\"pos\":[{\"lat\":lat,\"lng\":lng}],\"finalized\":0},safe=True)\n except pymongo.errors.OperationFailure:\n #error\n return \"insert fucked up\"\n except:\n #error\n return \"something is really bad\"\n \n group_t = open('group.tmpl', 'r')\n self.group_template = group_t.read()\n return str(Template(self.group_template, {\"name\":name}))\n elif g_exist > 0:\n #error page\n #group does exist (bad)\n #redirect to group page, tell error\n return \"group already exists\"\n elif g_exist == -1:\n #empty name\n return \"Group is empty\"\n else:\n #what the fuck\n return \"Error!\"\n new.exposed = True\n\ncherrypy.quickstart(Index())\n","repo_name":"mosesn/MeatMe","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32171250949","text":"\"\"\"Beautiful Soup\nElixir and Tonic\n\"The Screen-Scraper's Friend\"\nhttp://www.crummy.com/software/BeautifulSoup/\n\nBeautiful Soup uses a pluggable XML or HTML parser to parse a\n(possibly invalid) document into a tree representation. Beautiful Soup\nprovides provides methods and Pythonic idioms that make it easy to\nnavigate, search, and modify the parse tree.\n\nBeautiful Soup works with Python 2.6 and up. It works better if lxml\nand/or html5lib is installed.\n\nFor more than you ever wanted to know about Beautiful Soup, see the\ndocumentation:\nhttp://www.crummy.com/software/BeautifulSoup/bs4/doc/\n\"\"\"\n\n__author__ = \"Leonard Richardson (leonardr@segfault.org)\"\n__version__ = \"4.4.0\"\n__copyright__ = \"Copyright (c) 2004-2015 Leonard Richardson\"\n__license__ = \"MIT\"\n\n__all__ = ['BeautifulSoup']\n\nimport sys\nimport os\nimport re\nimport warnings\n\ndef _remove_xml_header(data):\n return re.sub(r'<\\s*\\?xml\\s*[^\\?>]*\\?*>\\s*','',data, flags=re.I)\n\nfrom .builder import builder_registry, ParserRejectedMarkup\nfrom .dammit import UnicodeDammit\nfrom .element import (\n CData,\n Comment,\n DEFAULT_OUTPUT_ENCODING,\n Declaration,\n Doctype,\n NavigableString,\n PageElement,\n ProcessingInstruction,\n ResultSet,\n SoupStrainer,\n Tag,\n )\n\n# The very first thing we do is give a useful error if someone is\n# running this code under Python 3 without converting it.\n'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'!='You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'\n\nclass BeautifulSoup(Tag):\n \"\"\"\n This class defines the basic interface called by the tree builders.\n\n These methods will be called by the parser:\n reset()\n feed(markup)\n\n The tree builder may call these methods from its feed() implementation:\n handle_starttag(name, attrs) # See note about return value\n handle_endtag(name)\n handle_data(data) # Appends to the current data node\n endData(containerClass=NavigableString) # Ends the current data node\n\n No matter how complicated the underlying parser is, you should be\n able to build a tree using 'start tag' events, 'end tag' events,\n 'data' events, and \"done with data\" events.\n\n If you encounter an empty-element tag (aka a self-closing tag,\n like HTML's
    tag), call handle_starttag and then\n handle_endtag.\n \"\"\"\n ROOT_TAG_NAME = '[document]'\n\n # If the end-user gives no indication which tree builder they\n # want, look for one with these features.\n DEFAULT_BUILDER_FEATURES = ['html', 'fast']\n\n ASCII_SPACES = '\\x20\\x0a\\x09\\x0c\\x0d'\n\n NO_PARSER_SPECIFIED_WARNING = \"No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\\\"%(parser)s\\\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\\n\\nTo get rid of this warning, change this:\\n\\n BeautifulSoup([your markup])\\n\\nto this:\\n\\n BeautifulSoup([your markup], \\\"%(parser)s\\\")\\n\"\n\n def __init__(self, markup=\"\", features=None, builder=None,\n parse_only=None, from_encoding=None, exclude_encodings=None,\n **kwargs):\n \"\"\"The Soup object is initialized as the 'root tag', and the\n provided markup (which can be a string or a file-like object)\n is fed into the underlying parser.\"\"\"\n\n if 'convertEntities' in kwargs:\n warnings.warn(\n \"BS4 does not respect the convertEntities argument to the \"\n \"BeautifulSoup constructor. Entities are always converted \"\n \"to Unicode characters.\")\n\n if 'markupMassage' in kwargs:\n del kwargs['markupMassage']\n warnings.warn(\n \"BS4 does not respect the markupMassage argument to the \"\n \"BeautifulSoup constructor. The tree builder is responsible \"\n \"for any necessary markup massage.\")\n\n if 'smartQuotesTo' in kwargs:\n del kwargs['smartQuotesTo']\n warnings.warn(\n \"BS4 does not respect the smartQuotesTo argument to the \"\n \"BeautifulSoup constructor. Smart quotes are always converted \"\n \"to Unicode characters.\")\n\n if 'selfClosingTags' in kwargs:\n del kwargs['selfClosingTags']\n warnings.warn(\n \"BS4 does not respect the selfClosingTags argument to the \"\n \"BeautifulSoup constructor. The tree builder is responsible \"\n \"for understanding self-closing tags.\")\n\n if 'isHTML' in kwargs:\n del kwargs['isHTML']\n warnings.warn(\n \"BS4 does not respect the isHTML argument to the \"\n \"BeautifulSoup constructor. Suggest you use \"\n \"features='lxml' for HTML and features='lxml-xml' for \"\n \"XML.\")\n\n def deprecated_argument(old_name, new_name):\n if old_name in kwargs:\n warnings.warn(\n 'The \"%s\" argument to the BeautifulSoup constructor '\n 'has been renamed to \"%s.\"' % (old_name, new_name))\n value = kwargs[old_name]\n del kwargs[old_name]\n return value\n return None\n\n parse_only = parse_only or deprecated_argument(\n \"parseOnlyThese\", \"parse_only\")\n\n from_encoding = from_encoding or deprecated_argument(\n \"fromEncoding\", \"from_encoding\")\n if from_encoding and isinstance(markup, str):\n warnings.warn(\"You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.\")\n from_encoding = None\n\n if len(kwargs) > 0:\n arg = list(kwargs.keys()).pop()\n raise TypeError(\n \"__init__() got an unexpected keyword argument '%s'\" % arg)\n\n if builder is None:\n original_features = features\n if isinstance(features, str):\n features = [features]\n if features is None or len(features) == 0:\n features = self.DEFAULT_BUILDER_FEATURES\n builder_class = builder_registry.lookup(*features)\n if builder_class is None:\n raise FeatureNotFound(\n \"Couldn't find a tree builder with the features you \"\n \"requested: %s. Do you need to install a parser library?\"\n % \",\".join(features))\n builder = builder_class()\n if not (original_features == builder.NAME or\n original_features in builder.ALTERNATE_NAMES):\n if builder.is_xml:\n markup_type = \"XML\"\n else:\n markup_type = \"HTML\"\n warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(\n parser=builder.NAME,\n markup_type=markup_type))\n\n self.builder = builder\n self.is_xml = builder.is_xml\n self.builder.soup = self\n\n self.parse_only = parse_only\n\n if hasattr(markup, 'read'): # It's a file-type object.\n markup = markup.read()\n elif len(markup) <= 256 and (\n (isinstance(markup, bytes) and not b'<' in markup)\n or (isinstance(markup, str) and not u'<' in markup)\n ):\n # Print out warnings for a couple beginner problems\n # involving passing non-markup to Beautiful Soup.\n # Beautiful Soup will still parse the input as markup,\n # just in case that's what the user really wants.\n if (isinstance(markup, str)\n and not os.path.supports_unicode_filenames):\n possible_filename = markup.encode(\"utf8\")\n else:\n possible_filename = markup\n is_file = False\n try:\n is_file = os.path.exists(possible_filename)\n except Exception as e:\n # This is almost certainly a problem involving\n # characters not valid in filenames on this\n # system. Just let it go.\n pass\n if is_file:\n if isinstance(markup, str):\n markup = markup.encode(\"utf8\")\n warnings.warn(\n '\"%s\" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)\n if markup[:5] == \"http:\" or markup[:6] == \"https:\":\n # TODO: This is ugly but I couldn't get it to work in\n # Python 3 otherwise.\n if ((isinstance(markup, bytes) and not b' ' in markup)\n or (isinstance(markup, str) and not ' ' in markup)):\n if isinstance(markup, str):\n markup = markup.encode(\"utf8\")\n warnings.warn(\n '\"%s\" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)\n\n for (self.markup, self.original_encoding, self.declared_html_encoding,\n self.contains_replacement_characters) in (\n self.builder.prepare_markup(\n markup, from_encoding, exclude_encodings=exclude_encodings)):\n self.reset()\n try:\n self._feed()\n break\n except ParserRejectedMarkup:\n pass\n\n # Clear out the markup and remove the builder's circular\n # reference to this object.\n self.markup = None\n self.builder.soup = None\n\n def __copy__(self):\n return type(self)(self.encode(), builder=self.builder)\n\n def __getstate__(self):\n # Frequently a tree builder can't be pickled.\n d = dict(self.__dict__)\n if 'builder' in d and not self.builder.picklable:\n del d['builder']\n return d\n\n def _feed(self):\n # Convert the document to Unicode.\n self.builder.reset()\n\n self.builder.feed(self.markup)\n # Close out any unfinished strings and close all the open tags.\n self.endData()\n while self.currentTag.name != self.ROOT_TAG_NAME:\n self.popTag()\n\n def reset(self):\n Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)\n self.hidden = 1\n self.builder.reset()\n self.current_data = []\n self.currentTag = None\n self.tagStack = []\n self.preserve_whitespace_tag_stack = []\n self.pushTag(self)\n\n def new_tag(self, name, namespace=None, nsprefix=None, **attrs):\n \"\"\"Create a new tag associated with this soup.\"\"\"\n return Tag(None, self.builder, name, namespace, nsprefix, attrs)\n\n def new_string(self, s, subclass=NavigableString):\n \"\"\"Create a new NavigableString associated with this soup.\"\"\"\n return subclass(s)\n\n def insert_before(self, successor):\n raise NotImplementedError(\"BeautifulSoup objects don't support insert_before().\")\n\n def insert_after(self, successor):\n raise NotImplementedError(\"BeautifulSoup objects don't support insert_after().\")\n\n def popTag(self):\n tag = self.tagStack.pop()\n if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:\n self.preserve_whitespace_tag_stack.pop()\n #print \"Pop\", tag.name\n if self.tagStack:\n self.currentTag = self.tagStack[-1]\n return self.currentTag\n\n def pushTag(self, tag):\n #print \"Push\", tag.name\n if self.currentTag:\n self.currentTag.contents.append(tag)\n self.tagStack.append(tag)\n self.currentTag = self.tagStack[-1]\n if tag.name in self.builder.preserve_whitespace_tags:\n self.preserve_whitespace_tag_stack.append(tag)\n\n def endData(self, containerClass=NavigableString):\n if self.current_data:\n current_data = ''.join(self.current_data)\n # If whitespace is not preserved, and this string contains\n # nothing but ASCII spaces, replace it with a single space\n # or newline.\n if not self.preserve_whitespace_tag_stack:\n strippable = True\n for i in current_data:\n if i not in self.ASCII_SPACES:\n strippable = False\n break\n if strippable:\n if '\\n' in current_data:\n current_data = '\\n'\n else:\n current_data = ' '\n\n # Reset the data collector.\n self.current_data = []\n\n # Should we add this string to the tree at all?\n if self.parse_only and len(self.tagStack) <= 1 and \\\n (not self.parse_only.text or \\\n not self.parse_only.search(current_data)):\n return\n\n o = containerClass(current_data)\n self.object_was_parsed(o)\n\n def object_was_parsed(self, o, parent=None, most_recent_element=None):\n \"\"\"Add an object to the parse tree.\"\"\"\n parent = parent or self.currentTag\n previous_element = most_recent_element or self._most_recent_element\n\n next_element = previous_sibling = next_sibling = None\n if isinstance(o, Tag):\n next_element = o.next_element\n next_sibling = o.next_sibling\n previous_sibling = o.previous_sibling\n if not previous_element:\n previous_element = o.previous_element\n\n o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)\n\n self._most_recent_element = o\n parent.contents.append(o)\n\n if parent.next_sibling:\n # This node is being inserted into an element that has\n # already been parsed. Deal with any dangling references.\n index = parent.contents.index(o)\n if index == 0:\n previous_element = parent\n previous_sibling = None\n else:\n previous_element = previous_sibling = parent.contents[index-1]\n if index == len(parent.contents)-1:\n next_element = parent.next_sibling\n next_sibling = None\n else:\n next_element = next_sibling = parent.contents[index+1]\n\n o.previous_element = previous_element\n if previous_element:\n previous_element.next_element = o\n o.next_element = next_element\n if next_element:\n next_element.previous_element = o\n o.next_sibling = next_sibling\n if next_sibling:\n next_sibling.previous_sibling = o\n o.previous_sibling = previous_sibling\n if previous_sibling:\n previous_sibling.next_sibling = o\n\n def _popToTag(self, name, nsprefix=None, inclusivePop=True):\n \"\"\"Pops the tag stack up to and including the most recent\n instance of the given tag. If inclusivePop is false, pops the tag\n stack up to but *not* including the most recent instqance of\n the given tag.\"\"\"\n #print \"Popping to %s\" % name\n if name == self.ROOT_TAG_NAME:\n # The BeautifulSoup object itself can never be popped.\n return\n\n most_recently_popped = None\n\n stack_size = len(self.tagStack)\n for i in range(stack_size - 1, 0, -1):\n t = self.tagStack[i]\n if (name == t.name and nsprefix == t.prefix):\n if inclusivePop:\n most_recently_popped = self.popTag()\n break\n most_recently_popped = self.popTag()\n\n return most_recently_popped\n\n def handle_starttag(self, name, namespace, nsprefix, attrs):\n \"\"\"Push a start tag on to the stack.\n\n If this method returns None, the tag was rejected by the\n SoupStrainer. You should proceed as if the tag had not occured\n in the document. For instance, if this was a self-closing tag,\n don't call handle_endtag.\n \"\"\"\n\n # print \"Start tag %s: %s\" % (name, attrs)\n self.endData()\n\n if (self.parse_only and len(self.tagStack) <= 1\n and (self.parse_only.text\n or not self.parse_only.search_tag(name, attrs))):\n return None\n\n tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,\n self.currentTag, self._most_recent_element)\n if tag is None:\n return tag\n if self._most_recent_element:\n self._most_recent_element.next_element = tag\n self._most_recent_element = tag\n self.pushTag(tag)\n return tag\n\n def handle_endtag(self, name, nsprefix=None):\n #print \"End tag: \" + name\n self.endData()\n self._popToTag(name, nsprefix)\n\n def handle_data(self, data):\n self.current_data.append(data)\n\n def decode(self, pretty_print=False,\n eventual_encoding=DEFAULT_OUTPUT_ENCODING,\n formatter=\"minimal\", indent_chars=\" \"):\n \"\"\"Returns a string or Unicode representation of this document.\n To get Unicode, pass None for encoding.\"\"\"\n\n if self.is_xml:\n # Print the XML declaration\n encoding_part = ''\n if eventual_encoding != None:\n encoding_part = ' encoding=\"%s\"' % eventual_encoding\n prefix = '\\n' % encoding_part\n else:\n prefix = ''\n if not pretty_print:\n indent_level = None\n else:\n indent_level = 0\n return prefix + super(BeautifulSoup, self).decode(\n indent_level, eventual_encoding, formatter, indent_chars)\n\n def decodexml(self, indent_level=0, eventual_encoding=DEFAULT_OUTPUT_ENCODING,\n formatter=\"minimal\", indent_chars=\" \"):\n \"\"\"Returns a string or Unicode representation of this document.\n as pretty printed xml\"\"\"\n\n # generate a correct xml header declaration\n encoding_part = ''\n if eventual_encoding != None:\n encoding_part = ' encoding=\"%s\"' % eventual_encoding\n prefix = '\\n' % encoding_part\n # remove any existing xml header pi since its encoding may now be incorrect\n # before adding in new xml header pi with the proper specified encoding\n newsource = super(BeautifulSoup, self).decodexml(\n indent_level, eventual_encoding, formatter, indent_chars)\n if newsource.startswith('\\n' % encoding_part\n newsource = super(BeautifulSoup, self).serialize_xhtml(eventual_encoding, formatter)\n # remove any existing xml header declaration since its encoding may now be incorrect\n # before adding in new xml header declaration with the proper specified encoding\n if newsource.startswith('\\n' % encoding_part\n newsource = super(BeautifulSoup, self).prettyprint_xhtml(indent_level, eventual_encoding, formatter, indent_chars)\n # remove any existing xml header pi since its encoding may now be incorrect\n # before adding in new xml header pi with the proper specified encoding\n if newsource.startswith(' a[j+1]:\n a[j], a[j+1] = a[j+1], a[j]\n\n return a\n\nN = int(input())\nnum = input()\nC = [0]*10\n\nfor i in num:\n C[int(i)] += 1\n\nres = BubbleSort(C, 10)\nress = res[-1]\n\nfor i in range(9, -1, -1):\n if C[i] == ress:\n print(i, ress)\n\n \n ","repo_name":"junchicode/TIL","sub_path":"Algo_week_SWEA&Class/week1/0809/4834.py","file_name":"4834.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15456085815","text":"import sys\nimport traceback\nimport urllib2\n\nfrom functools import wraps\n\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nfrom ubuntu_sso import NO_OP, utils\nfrom ubuntu_sso.keyring import Keyring\nfrom ubuntu_sso.logger import setup_logging\n\nlogger = setup_logging('ubuntu_sso.credentials')\n\n\nAPP_NAME_KEY = 'app_name'\nTC_URL_KEY = 'tc_url'\nHELP_TEXT_KEY = 'help_text'\nWINDOW_ID_KEY = 'window_id'\nPING_URL_KEY = 'ping_url'\nUI_MODULE_KEY = 'ui_module'\nUI_CLASS_KEY = 'ui_class'\nSUCCESS_CB_KEY = 'success_cb'\nERROR_CB_KEY = 'error_cb'\nDENIAL_CB_KEY = 'denial_cb'\nERROR_KEY = 'error_message'\nERROR_DETAIL_KEY = 'detailed_error'\n\n\ndef handle_exceptions(msg):\n \"\"\"Handle exceptions using 'msg' as error message.\"\"\"\n\n def middle(f):\n \"\"\"Decorate 'f' to catch all errors.\"\"\"\n\n @wraps(f)\n def inner(self, *a, **kw):\n \"\"\"Call 'f' within a try-except block.\n\n If any exception occurs, self.error_cb is called and the exception\n is logged.\n \"\"\"\n result = None\n try:\n result = f(self, *a, **kw)\n except: # pylint: disable=W0702\n logger.exception('%s (app_name: %s): %s.',\n f.__name__, self.app_name, msg)\n logger.error('%s (app_name: %s): Calling error_cb at %r.',\n f.__name__, self.app_name, self.error_cb)\n error_dict = {ERROR_KEY: msg,\n ERROR_DETAIL_KEY: traceback.format_exc()}\n self.error_cb(error_dict)\n return result\n\n return inner\n\n return middle\n\n\ndef handle_failures(msg):\n \"\"\"Handle failures using 'msg' as error message.\"\"\"\n\n def middle(f):\n \"\"\"Decorate 'f' to catch all errors.\"\"\"\n\n @wraps(f)\n @inlineCallbacks\n def inner(self, *a, **kw):\n \"\"\"Call 'f' within a try-except block.\n\n If any exception occurs, self.error_cb is called and the exception\n is logged.\n \"\"\"\n result = None\n try:\n result = yield f(self, *a, **kw)\n except Exception: # pylint: disable=W0703\n logger.exception('%s (app_name: %s): %s.',\n f.__name__, self.app_name, msg)\n logger.error('%s (app_name: %s): Calling error_cb at %r.',\n f.__name__, self.app_name, self.error_cb)\n error_dict = {ERROR_KEY: msg,\n ERROR_DETAIL_KEY: traceback.format_exc()}\n self.error_cb(error_dict)\n returnValue(result)\n\n return inner\n\n return middle\n\n\nclass Credentials(object):\n \"\"\"Credentials management gateway.\"\"\"\n\n def __init__(self, app_name, tc_url=None, help_text='',\n window_id=0, ping_url=None,\n ui_module='ubuntu_sso.gtk.gui', ui_class='UbuntuSSOClientGUI',\n success_cb=NO_OP, error_cb=NO_OP, denial_cb=NO_OP):\n \"\"\"Return a Credentials management object.\n\n 'app_name' is the application name to be displayed in the GUI.\n\n 'tc_url' is the URL pointing to Terms & Conditions. If None, no\n TOS agreement will be displayed.\n\n 'help_text' is an explanatory text for the end-users, will be shown\n below the headers.\n\n 'window_id' is the id of the window which will be set as a parent of\n the GUI. If 0, no parent will be set.\n\n 'ping_url' is the url that will be pinged when a user registers/logins\n successfully. The user email will be attached to 'ping_url'.\n\n 'success_cb' will be called when the credentials were retrieved\n successfully. Two params will be passed: the app_name and the\n credentials per se. The credentials is a dictionary of the form:\n\n {'token': ,\n 'token_secret': ,\n 'consumer_key': ,\n 'consumer_secret': ,\n 'name': }\n\n 'error_cb' will be called when the credentials retrieval failed. Two\n params will be passed: the app_name, and an error dict with 2 keys:\n the error message (user friendly, not translatable so far), and\n the detailed error (usually the traceback).\n\n 'denial_cb' will be called when the user denied the credentials to the\n caller. A single param is passed: the app_name.\n\n \"\"\"\n self.app_name = app_name\n self.help_text = help_text\n self.window_id = window_id\n self.ping_url = ping_url\n self.tc_url = tc_url\n self.ui_module = ui_module\n self.ui_class = ui_class\n self._success_cb = success_cb\n self._error_cb = error_cb\n self.denial_cb = denial_cb\n self.inner = None # will hold the GUI or SSOLoginRoot instance\n\n @handle_failures(msg='Problem while retrieving credentials')\n @inlineCallbacks\n def _login_success_cb(self, app_name, email):\n \"\"\"Store credentials when the login/registration succeeded.\n\n Also, open self.ping_url/email to notify about this new token. If any\n error occur, self.error_cb is called. Otherwise, self.success_cb is\n called.\n\n Return 0 on success, and a non-zero value (or None) on error.\n\n \"\"\"\n logger.info('Login/registration was successful for app %r, email %r',\n app_name, email)\n creds = yield self.find_credentials()\n if creds is not None:\n assert len(creds) > 0, 'Creds are empty! This should not happen'\n # ping a server with the credentials if we were requested to\n if self.ping_url is not None:\n status = yield self._ping_url(app_name, email, creds)\n if status is None:\n yield self.clear_credentials()\n return\n\n self.success_cb(creds)\n returnValue(0)\n\n def _auth_denial_cb(self, app_name):\n \"\"\"The user decided not to allow the registration or login.\"\"\"\n logger.warning('Login/registration was denied to app %r', app_name)\n self.denial_cb(app_name)\n\n @handle_failures(msg='Problem opening the ping_url')\n @inlineCallbacks\n def _ping_url(self, app_name, email, credentials):\n \"\"\"Ping the self.ping_url with the email attached.\n\n Sign the request with the user credentials. The self.ping_url must be\n defined if this method is being called.\n\n \"\"\"\n logger.info('Pinging server for app_name %r, ping_url: %r, '\n 'email %r.', app_name, self.ping_url, email)\n try:\n url = self.ping_url.format(email=email)\n except IndexError: # tuple index out of range\n url = self.ping_url.format(email) # format the first substitution\n\n if url == self.ping_url:\n logger.debug('Original url (%r) could not be formatted, '\n 'appending email (%r).', self.ping_url, email)\n url = self.ping_url + email\n\n headers = utils.oauth_headers(url, credentials)\n request = urllib2.Request(url, headers=headers)\n logger.debug('Opening the url %r with urllib2.urlopen.',\n request.get_full_url())\n # This code is blocking, we should change this.\n # I've tried with deferToThread an twisted.web.client.getPage\n # but the returned deferred will never be fired (nataliabidart).\n response = urllib2.urlopen(request)\n logger.debug('Url opened. Response: %s.', response.code)\n returnValue(response)\n\n @handle_exceptions(msg='Problem opening the Ubuntu SSO user interface')\n def _show_ui(self, login_only):\n \"\"\"Shows the UI, connect outcome signals.\"\"\"\n\n __import__(self.ui_module)\n gui = sys.modules[self.ui_module]\n\n self.inner = getattr(gui, self.ui_class)(app_name=self.app_name,\n tc_url=self.tc_url, help_text=self.help_text,\n window_id=self.window_id, login_only=login_only)\n\n self.inner.login_success_callback = self._login_success_cb\n self.inner.registration_success_callback = self._login_success_cb\n self.inner.user_cancellation_callback = self._auth_denial_cb\n\n @handle_exceptions(msg='Problem logging with email and password.')\n def _do_login(self, email, password):\n \"\"\"Login using email/password, connect outcome signals.\"\"\"\n from ubuntu_sso.main import SSOLoginRoot\n self.inner = SSOLoginRoot()\n self.inner.login(app_name=self.app_name, email=email,\n password=password,\n result_cb=self._login_success_cb,\n error_cb=self._error_cb,\n not_validated_cb=self._error_cb)\n\n @handle_failures(msg='Problem while retrieving credentials')\n @inlineCallbacks\n def _login_or_register(self, login_only, email=None, password=None):\n \"\"\"Get credentials if found else prompt the GUI.\"\"\"\n logger.info(\"_login_or_register: login_only=%r email=%r.\",\n login_only, email)\n token = yield self.find_credentials()\n if token is not None and len(token) > 0:\n self.success_cb(token)\n elif token == {}:\n if email and password:\n self._do_login(email, password)\n else:\n self._show_ui(login_only)\n else:\n # something went wrong with find_credentials, already handled.\n logger.info('_login_or_register: call to \"find_credentials\" went '\n 'wrong, and was already handled.')\n\n def error_cb(self, error_dict):\n \"\"\"Handle error and notify the caller.\"\"\"\n logger.error('Calling error callback at %r (error is %r).',\n self._error_cb, error_dict)\n self._error_cb(self.app_name, error_dict)\n\n def success_cb(self, creds):\n \"\"\"Handle success and notify the caller.\"\"\"\n logger.debug('Calling success callback at %r.', self._success_cb)\n self._success_cb(self.app_name, creds)\n\n @inlineCallbacks\n def find_credentials(self):\n \"\"\"Get the credentials for 'self.app_name'. Return {} if not there.\"\"\"\n creds = yield Keyring().get_credentials(self.app_name)\n logger.info('find_credentials: self.app_name %r, '\n 'result is {}? %s', self.app_name, creds is None)\n returnValue(creds if creds is not None else {})\n\n @inlineCallbacks\n def clear_credentials(self):\n \"\"\"Clear the credentials for 'self.app_name'.\"\"\"\n yield Keyring().delete_credentials(self.app_name)\n\n @inlineCallbacks\n def store_credentials(self, token):\n \"\"\"Store the credentials for 'self.app_name'.\"\"\"\n yield Keyring().set_credentials(self.app_name, token)\n\n def register(self):\n \"\"\"Get credentials if found else prompt the GUI to register.\"\"\"\n return self._login_or_register(login_only=False)\n\n def login(self):\n \"\"\"Get credentials if found else prompt the GUI to login.\"\"\"\n return self._login_or_register(login_only=True)\n\n def login_email_password(self, email, password):\n \"\"\"Get credentials if found else login using email and password.\"\"\"\n return self._login_or_register(login_only=True,\n email=email, password=password)\n","repo_name":"Alberto-Beralix/Beralix","sub_path":"i386-squashfs-root/usr/share/pyshared/ubuntu-sso-client/ubuntu_sso/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":11489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"16120663687","text":"from db_models.modelsv2 import ProjectControlLog, Projects\nfrom flask_restful import Resource, fields\nfrom modules.json_serializator import engine_encode\nfrom modules.db_helper import update_item\nimport json\n\n# PARAMS\nNAME = \"ProjectControlLogResource\"\nNAME_LIST = \"ProjectControlLogListResource\"\nENTITY_NAME = \"ProjectControlLog\"\nMODEL = ProjectControlLog\nROUTE = \"/v2/projectControlLog\"\nEND_POINT = \"v2-project-control-log\"\nROUTE_LIST = \"/v2/projectControlLog\"\nEND_POINT_LIST = \"v2-project-control-log-list\"\n# NESTED SCHEMA FIELDS\n\n# OUTPUT SCHEMA\nOUTPUT_FIELDS = {\n 'id': fields.Integer,\n 'data': fields.String,\n 'project_id': fields.Integer\n}\n\n\ndef post_data_converter(json_data):\n json_data = json.loads(json_data)\n return {\n 'project_id': json_data['projectId'],\n 'data': engine_encode(json_data['data'])\n }\n\ndef after_post_action(entity, json_data):\n try:\n project = update_item(Projects, {'control_log_state_id': json_data[\"state_id\"]}, entity.project_id)\n if project is None:\n raise Exception('Unable to update project with id:{}'.format(entity.project_id))\n except Exception as e:\n raise e\n","repo_name":"vyadzmak/Landau.X.Api","sub_path":"resv2/project_control_log_resources.py","file_name":"project_control_log_resources.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74368168265","text":"import sys\n\n\ndef three_sum_closest(nums, target):\n nums = sorted(nums)\n traversed_over = set()\n closest = None\n for i in range(len(nums)):\n if nums[i] in traversed_over:\n pass\n else:\n current = find_closest(nums, i + 1, len(nums) - 1, nums[i], target)\n if closest is None:\n closest = current\n elif current is not None and abs(target - current) < abs(target - closest):\n closest = current\n if closest == target:\n return closest\n traversed_over.add(nums[i])\n return closest\n\n\ndef find_closest(nums, l, r, num, target):\n closest = None\n while l < len(nums) and r >= 0 and l < r:\n current = nums[l] + nums[r] + num\n if current == target:\n return current\n elif closest is None or abs(target - current) < abs(target - closest):\n closest = current\n\n if current < target:\n l += 1\n else:\n r -= 1\n\n return closest\n\n\nprint(three_sum_closest([-1, 2, 1, -4], 1))\n\nprint(three_sum_closest([0, 0, 0], 1))\n\nprint(three_sum_closest([1,1,-1,-1,3], -1))\n\nprint(three_sum_closest([-55,-24,-18,-11,-7,-3,4,5,6,9,11,23,33], 0))\n","repo_name":"Royal4224/LeetCode-Practice","sub_path":"16. three_sum_closest.py","file_name":"16. three_sum_closest.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74897531466","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Importing packages\n\nimport matplotlib.pyplot as plt\n\n\n# Functions\n# Taken input to be 0 in both cases (x = 0)\n\ndef first(x,y,T):\n return -y/T\ndef second(x,y,y_d):\n return y_d\ndef second1(x,y,y_d):\n return -2*si*w*y_d - w*w*y\n\n\n############################ FIRST ORDER ###########################################\n\nt=[0]\ny=[10] # y\nh=0.05 #step-size\ntf=30 #final time\n\n# Count number of iterations using step size h\nn = (int)(tf/h)\n# Iterate for number of iterations\nt0=0\n\nT = input(\"Enter value of T: \")\nT = float(T)\n\nfor i in range(1,n):\n \"Apply Runge Kutta Formulas to find next value of y\"\n k1 = first(t0,y[-1],T)\n k2 = first(t0+0.5*h, y[-1]+0.5*h*k1,T)\n k3 = first(t0+0.5*h, y[-1]+0.5*h*k2,T)\n k4 = first(t0+h, y[-1]+h*k3,T)\n\n # Update next value of theta\n y.append(y[-1] + (1/6)*(k1 + 2 * k2 + 2 * k3 + k4)*h)\n # Update next value of t\n t0 = t0 + h\n t.append(t0)\nfig=plt.figure(figsize=(10,7))\nplt.plot(t,y)\nplt.title('y vs t')\n\n\n\n\n############################ SECOND ORDER ##########################################\n\nt=[0]\ny=[10] # y\ny_d=[10]\nh=0.05 #step-size\ntf=30 #final time\n# Count number of iterations using step size h\nn = (int)(tf/h)\n# Iterate for number of iterations\nt0=0\n\nsi = input(\"Enter value of damping constant: \")\nsi = float(si)\nw = input(\"Enter value of natural frequency: \")\nw = float(w)\n\nfor i in range(1,n):\n \"Apply Runge Kutta Formulas to find next value of y\"\n k1 = second(t0,y[-1],y_d[-1])\n k1_d = second1(0,y[-1],y_d[-1])\n k2 = second(t0+0.5*h, y[-1]+0.5*h*k1,y_d[-1]+0.5*h*k1_d)\n k2_d = second1(t0+0.5*h, y[-1]+0.5*h*k1,y_d[-1]+0.5*h*k1_d)\n k3 = second(t0+0.5*h, y[-1]+0.5*h*k2,y_d[-1]+0.5*h*k2_d)\n k3_d = second1(t0+0.5*h, y[-1]+0.5*h*k2,y_d[-1]+0.5*h*k2_d)\n k4 = second(t0+h, y[-1]+h*k3,y_d[-1]+h*k3_d)\n k4_d = second1(t0+h, y[-1]+h*k3,y_d[-1]+h*k3_d)\n\n# Update next value of theta\n y.append(y[-1] + (1/6)*(k1 + 2 * k2 + 2 * k3 + k4)*h)\n y_d.append(y_d[-1] + (1/6)*(k1_d+2*k2_d+2*k3_d+k4_d)*h)\n # Update next value of t\n t0 = t0 + h\n t.append(t0)\n\nfig,a = plt.subplots(1,2,figsize=(18,7))\na[0].plot(t,y)\na[0].set_title('y vs t')\na[1].plot(t,y_d)\na[1].set_title('y_dot vs t')\nplt.show()\n\n################################# END ##############################################\n","repo_name":"adityarg/Aerospace","sub_path":"AE322A/Assig 2/Ass2_Q3.py","file_name":"Ass2_Q3.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39211838964","text":"from sklearn.decomposition import PCA\nimport colorsys\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FormatStrFormatter\nimport random\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom PIL import Image\nimport cv2\nimport utils\nimport os\nfrom collections import defaultdict\n\n\n# high level function used for making visualizations by passing in a command line arg\ndef create_visualizations(all_models, visualize_params, save_vis, model_output_fp):\n\n figs = set([o.lower() for o in visualize_params.split(',')])\n\n if 'bovw_pca' in figs:\n visualize_BOVW_PCA(all_models[0][0], save=save_vis, model_fp=model_output_fp)\n\n if 'bovw_examples' in figs:\n visualize_BOVW_examples(all_models[0][0], save=save_vis, model_fp=model_output_fp)\n \n if 'bovw_keypoints' in figs:\n visualize_BOVW_keypoints(all_models[0][0], save=save_vis, model_fp=model_output_fp)\n\n if 'n_train_examples' in figs:\n visualize_parameter(all_models, 'n_train_examples', save=save_vis, model_fp=model_output_fp)\n\n if 'n_clusters' in figs:\n visualize_parameter(all_models, ('cluster_model_params', 'n_clusters'), xscale='log', save=save_vis, model_fp=model_output_fp)\n\n if 'spatial_pyramid_levels' in figs:\n visualize_parameter(all_models, 'spatial_pyramid_levels', save=save_vis, model_fp=model_output_fp)\n\n if 'feature_selection_threshold' in figs:\n visualize_parameter(all_models, ('feature_selection_params', 'threshold'), save=save_vis, model_fp=model_output_fp)\n\n if 'descriptor_extractor_threshold' in figs:\n visualize_parameter(all_models, ('descriptor_extractor_params', 'threshold'), xscale='log', save=save_vis, model_fp=model_output_fp)\n\n\n# visualizes BOVW clusters in 3 dimensions (using PCA)\ndef visualize_BOVW_PCA(model, save=False, show=True, model_fp=None):\n\n BOVW = model.BOVW\n descriptors = BOVW.X\n clusters = BOVW.predict(descriptors)\n\n reduced_descriptors = PCA(n_components=3).fit_transform(descriptors)\n label_to_color = get_N_HexCol(BOVW.n_clusters)\n\n fig = plt.figure()\n fig.suptitle('Bag of Visual Words clusters (PCA)', fontsize=20)\n ax = Axes3D(fig)\n for c_i, d_i, pca_v in zip(clusters, descriptors, reduced_descriptors):\n\n ax.scatter(*pca_v, c=label_to_color[c_i])\n\n if save:\n fig_fp = get_fig_filepath('bovw_pca', model_fp)\n save_figure(fig, fig_fp)\n if show:\n plt.show()\n\n\n# extracts visual word patches and displays them in their clusters\ndef visualize_BOVW_examples(model, save=False, show=True, model_fp=None):\n \n BOVW = model.BOVW\n keypoints = BOVW.kp\n clusters = BOVW.clusters\n ims = BOVW.ims\n\n d = defaultdict(list)\n for im, kps, cs in zip(ims, keypoints, clusters):\n for kp, c in zip(kps, cs):\n (x_min, x_max), (y_min, y_max) = get_kp_bbox(kp)\n kp_im = Image.fromarray(im[y_min:y_max, x_min:x_max])\n d[c].append(kp_im)\n \n n_rows = n_cols = 8\n fig, axarr = plt.subplots(n_rows, n_cols)\n fig.suptitle('Bag of Visual Word samples', fontsize=20)\n fig.set_size_inches(2+n_cols, 0.5+n_rows)\n for r in range(n_rows):\n for c in range(n_cols):\n try:\n VW_im = d[c][r]\n axarr[r, c].imshow(VW_im)\n axarr[r, c].tick_params(labelsize=8, which='major')\n axarr[r, c].minorticks_off()\n except (IndexError, KeyError):\n pass\n # label columns\n for c, ax in enumerate(axarr[0]):\n ax.set_title('cluster #%d' % (c+1), pad=20, fontdict={'fontsize':8, 'fontweight':'semibold'})\n\n plt.subplots_adjust(left=0.05, right=0.95, top=0.85, bottom=0.05, wspace=1, hspace=0.5)\n\n if save:\n fig_fp = get_fig_filepath('bovw_examples', model_fp)\n save_figure(fig, fig_fp)\n if show:\n plt.show()\n\n\ndef get_kp_bbox(kp):\n x, y = kp.pt\n r = kp.size / 2\n x_min = int(x - r)\n x_max = int(x + r)\n y_min = int(y - r)\n y_max = int(y + r)\n return (x_min, x_max), (y_min, y_max)\n\n\n# draws keypoints over small sample of positive and negative images\ndef visualize_BOVW_keypoints(model, save=False, show=True, model_fp=None):\n \n BOVW = model.BOVW\n keypoints = BOVW.kp\n ims = BOVW.ims\n im_labels = BOVW.im_labels\n\n d = defaultdict(list)\n sorted_labels = sorted(set(im_labels))\n for im, im_label, kps in zip(ims, im_labels, keypoints):\n cv2_kps = utils.convert_custom_kps_to_cv2_kps(kps)\n kp_im = Image.fromarray(cv2.drawKeypoints(im, cv2_kps, None, color=(0,255,0), flags=4))\n d[im_label].append(kp_im)\n\n n_rows, n_cols = 5, len(sorted_labels)\n fig, axarr = plt.subplots(n_rows, n_cols)\n fig.suptitle('Snippet Keypoint Visualization', fontsize=20)\n fig.set_size_inches(2+n_cols*2, 2+n_rows*1.25)\n for r in range(n_rows):\n for c in range(n_cols):\n try:\n label = sorted_labels[c]\n kp_im = d[label][r]\n axarr[r, c].imshow(kp_im)\n except:\n pass # Not enough samples with this label\n \n # label columns\n for ax, label in zip(axarr[0], sorted_labels):\n ax.set_title(label, pad=20, fontdict={'fontsize':8, 'fontweight':'semibold'})\n\n plt.subplots_adjust(left=0.15, right=0.85, top=0.85, bottom=0.1, wspace=0.4, hspace=0.4)\n\n if save:\n fig_fp = get_fig_filepath('bovw_keypoints', model_fp)\n save_figure(fig, fig_fp)\n if show:\n plt.show()\n\n\n# graphs visualization of cross_val_err and test_acc with changes in a parameter\ndef visualize_parameter(results, parameter, save=False, show=True, model_fp=None, **kwargs):\n x = []\n errs = []\n accs = []\n for _, config, val_err, test_acc in results:\n if type(parameter) == tuple:\n arg, subarg = parameter\n x.append(config[arg][subarg])\n else:\n x.append(config[parameter])\n errs.append(val_err)\n accs.append(test_acc)\n \n fig, ax = plt.subplots()\n ax.plot(x, accs, marker='^', color='red', label='test_acc')\n ax.plot(x, errs, marker='s', color='blue', label='cross_val_err')\n ax.legend()\n\n if type(parameter) == tuple:\n parameter = '_'.join(parameter[0].split('_')[:-1] + [parameter[1]])\n ax.set_title('Cross-Val Error, Test Accuracy vs %s' % parameter, pad=15)\n\n ax.set_xticks(x)\n ax.set_yticks([0, 1], minor=True)\n ax.set_xlabel(parameter)\n if 'xscale' in kwargs:\n ax.set_xscale(kwargs['xscale'])\n ax.xaxis.set_major_formatter(FormatStrFormatter('%g'))\n #ax.xaxis.set_minor_formatter(FormatStrFormatter('%g'))\n\n # label points\n for x, (err, acc) in zip(x, zip(errs, accs)):\n ax.annotate('(%d, %g)' % (x, err), (x,err), xytext=(0,9), textcoords='offset points', ha='center')\n ax.annotate('(%d, %g)' % (x, acc), (x,acc), xytext=(0,-16), textcoords='offset points', ha='center')\n\n if save:\n fig_fp = get_fig_filepath(parameter, model_fp)\n save_figure(fig, fig_fp)\n if show:\n plt.show()\n\n\n# methods for saving graphics\n\ndef save_figure(fig, fig_fp):\n fig.savefig(fig_fp)\n\ndef get_fig_filepath(parameter, model_fp):\n model_dir = get_directory(model_fp)\n model_fn_base = remove_extension(get_filename(model_fp))\n\n fig_fn = model_fn_base + '.' + parameter + '_vis.png'\n fig_fp = os.path.join(model_dir, fig_fn)\n return fig_fp\n\ndef get_filename(fp):\n return fp.split('/')[-1]\n\ndef remove_extension(fn):\n return \".\".join(fn.split('.')[:-1])\n\ndef get_directory(fp):\n return \"/\".join(fp.split('/')[:-1])\n\n\n# adapted from https://stackoverflow.com/a/47194111\ndef get_N_HexCol(N):\n HSV_tuples = [(x * 1.0 / N, 0.75 + y * 0.25 / N, 0.75 + z * 0.25 / N) for x,y,z in zip(range(N), sorted(list(range(N)), key=lambda x: random.random()), sorted(list(range(N)), key=lambda x: random.random()))]\n hex_out = []\n for rgb in HSV_tuples:\n rgb = map(lambda x: int(x * 255), colorsys.hsv_to_rgb(*rgb))\n hex_out.append('#%02x%02x%02x' % tuple(rgb))\n return hex_out\n\n","repo_name":"josephcappadona/automated-CV-analytics","sub_path":"src/model/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":8056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70903922826","text":"from random import shuffle, choice\nfrom itertools import product, repeat, chain\n\n\nCOLORS = ['red', 'yellow', 'green', 'blue']\nALL_COLORS = COLORS + ['black']\nNUMBERS = list(range(10)) + list(range(1, 10))\nSPECIAL_CARD_TYPES = ['skip', 'reverse', '+2']\nCOLOR_CARD_TYPES = NUMBERS + SPECIAL_CARD_TYPES * 2\nBLACK_CARD_TYPES = ['wildcard', '+4']\nCARD_TYPES = NUMBERS + SPECIAL_CARD_TYPES + BLACK_CARD_TYPES\n\n\nclass UnoCard:\n \"\"\"\n Represents a single Uno Card, given a valid color and card type.\n\n color: string\n card_type: string/int\n\n >>> card = UnoCard('red', 5)\n \"\"\"\n def __init__(self, color, card_type):\n self._validate(color, card_type)\n self.color = color\n self.card_type = card_type\n self.temp_color = None\n\n def __repr__(self):\n return ''.format(self.color, self.card_type)\n\n def __str__(self):\n return '{}{}'.format(self.color_short, self.card_type_short)\n\n def __eq__(self, other):\n return self.color == other.color and self.card_type == other.card_type\n\n def _validate(self, color, card_type):\n \"\"\"\n Check the card is valid, raise exception if not.\n \"\"\"\n if color not in ALL_COLORS:\n raise ValueError('Invalid color')\n if color == 'black' and card_type not in BLACK_CARD_TYPES:\n raise ValueError('Invalid card type')\n if color != 'black' and card_type not in COLOR_CARD_TYPES:\n raise ValueError('Invalid card type')\n\n @property\n def color_short(self):\n return self.color[0].upper()\n\n @property\n def card_type_short(self):\n if self.card_type in ('skip', 'reverse', 'wildcard'):\n return self.card_type[0].upper()\n else:\n return self.card_type\n\n @property\n def _color(self):\n return self.temp_color if self.temp_color else self.color\n\n @property\n def temp_color(self):\n return self._temp_color\n\n @temp_color.setter\n def temp_color(self, color):\n if color is not None:\n if color not in COLORS:\n raise ValueError('Invalid color')\n self._temp_color = color\n\n def playable(self, other):\n \"\"\"\n Return True if the other card is playable on top of this card,\n otherwise return False\n \"\"\"\n return (\n self._color == other.color or\n self.card_type == other.card_type or\n other.color == 'black'\n )\n\n\nclass UnoPlayer:\n \"\"\"\n Represents a player in an Uno game. A player is created with a list of 7\n Uno cards.\n\n cards: list of 7 UnoCards\n player_id: int/str (default: None)\n\n >>> cards = [UnoCard('red', n) for n in range(7)]\n >>> player = UnoPlayer(cards)\n \"\"\"\n def __init__(self, cards, player_id=None):\n if len(cards) != 7:\n raise ValueError(\n 'Invalid player: must be initalised with 7 UnoCards'\n )\n if not all(isinstance(card, UnoCard) for card in cards):\n raise ValueError(\n 'Invalid player: cards must all be UnoCard objects'\n )\n self.hand = cards\n self.player_id = player_id\n\n def __repr__(self):\n if self.player_id is not None:\n return ''.format(self.player_id)\n else:\n return ''\n\n def __str__(self):\n if self.player_id is not None:\n return str(self.player_id)\n else:\n return repr(self)\n\n def can_play(self, current_card):\n \"\"\"\n Return True if the player has any playable cards (on top of the current\n card provided), otherwise return False\n \"\"\"\n return any(current_card.playable(card) for card in self.hand)\n\n\nclass UnoGame:\n \"\"\"\n Represents an Uno game.\n\n players: int\n random: bool (default: True)\n\n >>> game = UnoGame(5)\n \"\"\"\n def __init__(self, players, random=True):\n if not isinstance(players, int):\n raise ValueError('Invalid game: players must be integer')\n if not 2 <= players <= 15:\n raise ValueError('Invalid game: must be between 2 and 15 players')\n self.deck = self._create_deck(random)\n self.players = [\n UnoPlayer(self._deal_hand(), n) for n in range(players)\n ]\n self._player_cycle = ReversibleCycle(self.players)\n self._current_player = next(self._player_cycle)\n self._winner = None\n\n def __next__(self):\n \"\"\"\n Iteration sets the current player to the next player in the cycle.\n \"\"\"\n self._current_player = next(self._player_cycle)\n\n def _create_deck(self, random):\n \"\"\"\n Return a list of the complete set of Uno Cards. If random is True, the\n deck will be shuffled, otherwise will be unshuffled.\n \"\"\"\n color_cards = product(COLORS, COLOR_CARD_TYPES)\n black_cards = product(repeat('black', 4), BLACK_CARD_TYPES)\n all_cards = chain(color_cards, black_cards)\n deck = [UnoCard(color, card_type) for color, card_type in all_cards]\n if random:\n shuffle(deck)\n return deck\n else:\n return list(reversed(deck))\n\n def _deal_hand(self):\n \"\"\"\n Return a list of 7 cards from the top of the deck, and remove these\n from the deck.\n \"\"\"\n return [self.deck.pop() for i in range(7)]\n\n @property\n def current_card(self):\n return self.deck[-1]\n\n @property\n def is_active(self):\n return all(len(player.hand) > 0 for player in self.players)\n\n @property\n def current_player(self):\n return self._current_player\n\n @property\n def winner(self):\n return self._winner\n\n def play(self, player, card=None, new_color=None):\n \"\"\"\n Process the player playing a card.\n\n player: int representing player index number\n card: int representing index number of card in player's hand\n\n It must be player's turn, and if card is given, it must be playable.\n If card is not given (None), the player picks up a card from the deck.\n\n If game is over, raise an exception.\n \"\"\"\n if not isinstance(player, int):\n raise ValueError('Invalid player: should be the index number')\n if not 0 <= player < len(self.players):\n raise ValueError('Invalid player: index out of range')\n _player = self.players[player]\n if self.current_player != _player:\n raise ValueError('Invalid player: not their turn')\n if card is None:\n self._pick_up(_player, 1)\n next(self)\n return\n _card = _player.hand[card]\n if not self.current_card.playable(_card):\n raise ValueError(\n 'Invalid card: {} not playable on {}'.format(\n _card, self.current_card\n )\n )\n if _card.color == 'black':\n if new_color not in COLORS:\n raise ValueError(\n 'Invalid new_color: must be red, yellow, green or blue'\n )\n if not self.is_active:\n raise ValueError('Game is over')\n\n played_card = _player.hand.pop(card)\n self.deck.append(played_card)\n\n card_color = played_card.color\n card_type = played_card.card_type\n if card_color == 'black':\n self.current_card.temp_color = new_color\n if card_type == '+4':\n next(self)\n self._pick_up(self.current_player, 4)\n elif card_type == 'reverse':\n self._player_cycle.reverse()\n elif card_type == 'skip':\n next(self)\n elif card_type == '+2':\n next(self)\n self._pick_up(self.current_player, 2)\n\n if self.is_active:\n next(self)\n else:\n self._winner = _player\n self._print_winner()\n\n def _print_winner(self):\n \"\"\"\n Print the winner name if available, otherwise look up the index number.\n \"\"\"\n if self.winner.player_id:\n winner_name = self.winner.player_id\n else:\n winner_name = self.players.index(self.winner)\n print(\"Player {} wins!\".format(winner_name))\n\n def _pick_up(self, player, n):\n \"\"\"\n Take n cards from the bottom of the deck and add it to the player's\n hand.\n\n player: UnoPlayer\n n: int\n \"\"\"\n penalty_cards = [self.deck.pop(0) for i in range(n)]\n player.hand.extend(penalty_cards)\n\n\nclass ReversibleCycle:\n \"\"\"\n Represents an interface to an iterable which can be infinitely cycled (like\n itertools.cycle), and can be reversed.\n\n Starts at the first item (index 0), unless reversed before first iteration,\n in which case starts at the last item.\n\n iterable: any finite iterable\n\n >>> rc = ReversibleCycle(range(3))\n >>> next(rc)\n 0\n >>> next(rc)\n 1\n >>> rc.reverse()\n >>> next(rc)\n 0\n >>> next(rc)\n 2\n \"\"\"\n def __init__(self, iterable):\n self._items = list(iterable)\n self._pos = None\n self._reverse = False\n\n def __next__(self):\n if self.pos is None:\n self.pos = -1 if self._reverse else 0\n else:\n self.pos = self.pos + self._delta\n return self._items[self.pos]\n\n @property\n def _delta(self):\n return -1 if self._reverse else 1\n\n @property\n def pos(self):\n return self._pos\n\n @pos.setter\n def pos(self, value):\n self._pos = value % len(self._items)\n\n def reverse(self):\n \"\"\"\n Reverse the order of the iterable.\n \"\"\"\n self._reverse = not self._reverse\n\n\nclass AIUnoGame:\n def __init__(self, players):\n self.game = UnoGame(players)\n self.player = choice(self.game.players)\n self.player_index = self.game.players.index(self.player)\n print('The game begins. You are Player {}.'.format(self.player_index))\n self.print_hand()\n while self.game.is_active:\n print()\n next(self)\n\n def __next__(self):\n game = self.game\n player = game.current_player\n player_id = player.player_id\n current_card = game.current_card\n if player == self.player:\n print('Current card: {}, color: {}'.format(\n game.current_card, game.current_card._color\n ))\n self.print_hand()\n if player.can_play(current_card):\n played = False\n while not played:\n card_index = int(input('Which card do you want to play? '))\n card = player.hand[card_index]\n if not game.current_card.playable(card):\n print('Cannot play that card')\n else:\n if card.color == 'black':\n new_color = input('Which color do you want? ')\n else:\n new_color = None\n game.play(player_id, card_index, new_color)\n played = True\n else:\n print('You cannot play. You must pick up a card.')\n game.play(player_id, card=None)\n self.print_hand()\n elif player.can_play(game.current_card):\n for i, card in enumerate(player.hand):\n if game.current_card.playable(card):\n if card.color == 'black':\n new_color = choice(COLORS)\n else:\n new_color = None\n print(\"Player {} played {}\".format(player, card))\n game.play(player=player_id, card=i, new_color=new_color)\n break\n else:\n print(\"Player {} picked up\".format(player))\n game.play(player=player_id, card=None)\n\n def print_hand(self):\n print('Your hand: {}'.format(\n ' '.join(str(card) for card in self.player.hand)\n ))\n","repo_name":"bennuttall/uno","sub_path":"uno.py","file_name":"uno.py","file_ext":"py","file_size_in_byte":12124,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"81"} +{"seq_id":"2184642557","text":"#!/usr/bin/env python\n\nfrom urllib import request\nfrom urllib import error\nimport json\nfrom pacman_mirrors.constants import colors\nfrom pacman_mirrors.functions import printFn\nfrom pacman_mirrors.functions.util import strip_protocol\nfrom pacman_mirrors.functions.util import msg\nfrom datetime import datetime\nfrom pacman_mirrors.functions.jsonFn import read_json_file\n\nC_KO = colors.RED\nC_OK = colors.GREEN\nC_NONE = colors.RESET\n\n\ndef get_local_mirrors() -> tuple:\n urls = []\n with open(\"/etc/pacman.d/mirrorlist\", \"r\") as f_list:\n for line in f_list:\n if not line.startswith(\"Server\"):\n continue\n line = line.split(\"=\")[1].strip()\n line = line.split(\"$\")[0]\n mirror_url = line.split('/')\n mirror_url.pop()\n mirror_branch = mirror_url.pop()\n line = \"/\".join(mirror_url)\n urls.append(line + \"/\",)\n return mirror_branch, urls\n\n\ndef get_state(states: list, branch: str) -> tuple:\n ret_color = C_OK\n status_text = \"OK\"\n x = states[0]\n if branch == \"testing\":\n x = states[1]\n if branch == \"unstable\":\n x = states[2]\n if x == 0:\n ret_color = C_KO\n status_text = \"--\"\n return ret_color, status_text\n\n\ndef print_status(self) -> int:\n \"\"\"\n Printe mirror status\n :param self:\n :return:\n \"\"\"\n # If configuration urls are missing - show only first mirror from mirror pool\n if not self.config[\"url_mirrors_json\"] or not self.config[\"url_status_json\"]:\n color = C_OK\n text = \"OK\"\n now = datetime.now()\n fake = now.strftime(\"00:%M\")\n mirror = get_static_mirror(self.config[\"mirror_file\"])\n print(f\"Mirror #1\", color, f\"{text}\", C_NONE, f\"{fake} {mirror['country']} {mirror['url']}\")\n return 0\n\n system_branch, mirrors_pacman = get_local_mirrors()\n try:\n with request.urlopen('https://repo.manjaro.org/status.json') as f_url:\n req = f_url.read()\n except error.URLError:\n msg(\"Downloading status failed!\", color=colors.BLUE)\n msg(\"Please check you network connection ...\", color=colors.YELLOW)\n return 1 # return failure\n json_data = json.loads(req)\n mirrors = []\n for mirror in json_data:\n for protocol in mirror[\"protocols\"]:\n temp = mirror.copy()\n temp[\"url\"] = f\"{protocol}://{strip_protocol(temp['url'])}\"\n mirrors.append(temp)\n\n mirrors = [m for m in mirrors if m['url'] in mirrors_pacman]\n\n printFn.yellow_msg(f\"Local mirror status for {system_branch} branch\")\n exit_code = 0 # up-to-date\n for i, url in enumerate(mirrors_pacman): # same order as pacman-conf\n try:\n mirror = [m for m in mirrors if m['url'] == url][0]\n color, text = get_state(mirror[\"branches\"], system_branch)\n len_country = max(len(m['country']) for m in mirrors) + 1\n print(f\"Mirror #{str(i + 1):2}\", color, f\"{text}\", C_NONE,\n f\"{mirror['last_sync']:7} {mirror['country']:{len_country}} {mirror['url']}\")\n if i == 0 and color == C_KO:\n exit_code = 4 # first mirror not sync !\n except IndexError:\n print(C_KO, f\"Mirror #{i + 1:2}\", f\"{url} does not exist{C_NONE}\")\n exit_code = 5 # not found\n\n return exit_code\n\n\ndef get_static_mirror(filename: str) -> dict:\n \"\"\"\n Get first mirror from mirror pool\n :param filename:\n :return:\n \"\"\"\n mirror = read_json_file(filename)\n return mirror[0]\n","repo_name":"Kyuofox/Manjaro_aarch64_chroot","sub_path":"usr/lib/python3.9/site-packages/pacman_mirrors/functions/mirror_status.py","file_name":"mirror_status.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35504757956","text":"from requests_oauthlib import OAuth1Session\nfrom os import environ\nfrom json import loads\n\n\nUS_WOEID = 23424977\n\n\nclass TwitterClient(object):\n API = \"https://api.twitter.com\"\n TWEET_URL = f\"{API}/1.1/statuses/update.json\"\n TRENDS_URL = f\"{API}/1.1/trends/place.json\"\n SEARCH_URL = f\"{API}/1.1/search/tweets.json\"\n GEO_URL = f\"{API}/1.1/geo/id/{{}}.json\"\n TOKEN_URL = f\"{API}/oauth2/token\"\n OAUTH_TOKEN_URL = f\"{API}/oauth/access_token\"\n\n STREAM = \"https://stream.twitter.com\"\n FILTER_URL = f\"{STREAM}/1.1/statuses/filter.json\"\n SAMPLE_URL = f\"{STREAM}/1.1/statuses/sample.json\"\n\n def __init__(self):\n self.session = OAuth1Session(\n environ[\"TWITTER_API_KEY\"],\n client_secret=environ[\"TWITTER_API_SECRET\"],\n resource_owner_key=environ[\"TWITTER_TOKEN_ACCESS\"],\n resource_owner_secret=environ[\"TWITTER_TOKEN_SECRET\"],\n )\n\n def stream(self):\n with self.session.get(\n self.SAMPLE_URL,\n params=dict(\n filter_level=\"low\",\n language=\"en\",\n ),\n stream=True,\n ) as resp:\n for i in resp.iter_lines():\n tweet = loads(i)\n if \"delete\" in tweet:\n continue\n if \"retweeted_status\" in tweet:\n tweet = tweet[\"retweeted_status\"]\n if \"extended_tweet\" in tweet:\n yield tweet[\"extended_tweet\"][\"full_text\"]\n else:\n yield tweet[\"text\"]\n","repo_name":"dparker2/internet-trends","sub_path":"worker/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42629803990","text":"import fastf1\nfrom fastf1 import plotting\nimport os\nimport plotly.express as px\nimport pandas as pd\n\n\nteam_color_map = {\n \"Red Bull Racing\": \"blue\",\n \"Aston Martin\": \"green\",\n \"Ferrari\": \"red\",\n \"Mercedes\": \"mediumturquoise\",\n \"Alfa Romeo\": \"darkred\",\n \"Apline\": \"deeppink\",\n \"Williams\": \"lightblue\",\n \"AlphaTauri\": \"blueviolet\",\n \"Haas F1 Team\": \"White\",\n \"McLaren\": \"orange\",\n}\n\n\ndef get_laps():\n # lec_laps = race.laps.pick_driver('LEC')\n # lap = race.laps.pick_fastest()\n\n # [ 'Time', 'DriverNumber', 'LapTime', 'LapNumber', 'PitOutTime', 'PitInTime'\n # , 'Sector1Time', 'Sector2Time', 'Sector3Time', 'Sector1SessionTime', 'Sector2SessionTime', 'Sector3SessionTime'\n # , 'SpeedI1', 'SpeedI2', 'SpeedFL', 'SpeedST', 'IsPersonalBest', 'Compound', 'TyreLife', 'FreshTyre', 'Stint', 'LapStartTime', 'Team', 'Driver', 'TrackStatus', 'IsAccurate', 'LapStartDate']\n return race.laps\n\n\ndef get_pos_data():\n pos_data = race.pos_data\n print(pos_data)\n # ['Date', 'Status', 'X', 'Y', 'Z', 'Source', 'Time', 'SessionTime']\n print(pos_data['44'].keys())\n\n\ndef get_tel_data(lap):\n # ['Date', 'SessionTime', 'DriverAhead', 'DistanceToDriverAhead', 'Time', 'RPM', 'Speed', 'nGear', 'Throttle', 'Brake', 'DRS', 'Source', 'Distance', 'RelativeDistance', 'Status', 'X', 'Y', 'Z']\n return lap.get_telemetry()\n\n\ncache_dir = f'{os.getcwd()}\\cache'\n\nplotting.setup_mpl()\n\nfastf1.Cache.enable_cache('H:/projects/fastf1data/sandbox/cache')\n\nrace = fastf1.get_session(2023, 'Saudi Grand Prix', 'R')\nrace.load()\n\n\nlap_data = get_laps()\n\ndrivers = pd.DataFrame(columns=['driver', 'maxspeed', 'minspeed', 'lapnumber'])\n\nprint(lap_data.columns)\n\nfor index, row in lap_data.iterrows():\n print(f\"{index} / {len(lap_data.index)} ({row['Driver']})\")\n\n if row['Team'] == 'Ferrari':\n tel_data = get_tel_data(row)\n\n drivers = drivers.append({\n 'driver': row['Driver'],\n 'maxspeed': tel_data['Speed'].max(),\n 'minspeed': tel_data['Speed'].min(),\n 'lapnumber': row['LapNumber'],\n 'laptime': row['LapTime'].total_seconds(),\n 'team': row['Team'],\n }, ignore_index=True)\n\n # print(f\"{row['Driver']} - {row['LapNumber']} ({row['TyreLife']})\")\n # print(f\"maxspeed: \", tel_data['Speed'].max())\n # print(f\"minspeed: \", tel_data['Speed'].min())\n # print(f\"Throttle max: \", tel_data['Throttle'].max())\n # print(f\"Throttle min: \", tel_data['Throttle'].min())\n # print(f\"Throttle mean: \", tel_data['Throttle'].mean())\n\nprint(drivers)\n\n# maxspeed, laptime\n# fig = px.box(drivers, x=\"driver\", y=\"laptime\", boxmode=\"overlay\",\n# color=\"team\", color_discrete_map=team_color_map)\n# fig.show()\n\n\nfig = px.line(drivers, x=\"lapnumber\", y=\"laptime\",\n color='driver', title='Laptime')\nfig.show()\n\n\n# all_laps.groupby(['Col1'])[Col2].max()\n\n# print(tel_data)\n# print(len(tel_data.index))\n# print(tel_data.columns)\n\n\"\"\"\n[\n '_Session__fix_tyre_info',\n '_calculate_t0_date',\n '_car_data',\n '_check_lap_accuracy',\n '_drivers_from_f1_api',\n '_drivers_results_from_ergast',\n '_get_property_warn_not_loaded',\n '_laps',\n '_load_drivers_results',\n '_load_laps_data',\n '_load_race_control_messages',\n '_load_telemetry',\n '_load_weather_data',\n '_pos_data',\n '_race_control_messages',\n '_results',\n '_session_start_time',\n '_session_status',\n '_t0_date',\n '_weather_data',\n 'api_path',\n 'car_data',\n 'date',\n 'drivers',\n 'event',\n 'f1_api_support',\n 'get_driver',\n 'laps',\n 'load',\n 'load_laps',\n 'load_telemetry',\n 'name',\n 'pos_data',\n 'race_control_messages',\n 'results',\n 'session_start_time',\n 'session_status',\n 't0_date',\n 'weather_data',\n 'weekend'\n\"\"\"\n","repo_name":"jkbngl/fastf1data","sub_path":"sandbox/gettingstarted.py","file_name":"gettingstarted.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6725109692","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nc=input(\"Number of profile: \")\nc=int(c)\n\nfor i in range(c):\n i=i+1\n a = input(f\"Amplitude {i} : \")\n w = input(f\"Frequency {i} : \")\n a=int(a)\n w=int(w)\n x= np.arange(0,2*np.pi,0.001)\n y=a*np.sin(w*x)\n\n plt.plot(x,y)\nplt.ylabel('Amplitude')\nplt.xlabel('X values from 0 to 2pi')\nplt.title('Plot of sine from 0 to 2pi')\nplt.legend([\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\"])\nplt.show()\n\n","repo_name":"kocaksey/Sine_","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11187376020","text":"from pathlib import Path\nfrom typing import NamedTuple\n\nclass Instruction(NamedTuple):\n direction: str\n amount: int\n\nclass Waypoint(object):\n\n def __init__(self, east, south, west, north):\n self.east = east\n self.south = south\n self.west = west\n self.north = north\n\n def rotate(self, direction: str, amount: int):\n if direction == \"L\":\n while amount:\n self.east, self.south, self.west, self.north = self.south, self.west, self.north, self.east\n amount -= 90\n else:\n while amount:\n self.east, self.south, self.west, self.north = self.north, self.east, self.south, self.west\n amount -= 90\n \n def update(self, direction: str, amount: int):\n if direction == \"E\":\n self.east += amount\n elif direction == \"S\":\n self.south += amount\n elif direction == \"W\":\n self.west += amount\n elif direction == \"N\":\n self.north += amount\n\n def __str__(self) -> str:\n return f\"East: {self.east} South: {self.south} West: {self.west} North: {self.north}\"\n\nclass Ship(object):\n\n def __init__(self, east: int, south: int, west: int, north: int, direction: int):\n self.east = east\n self.south = south\n self.west = west\n self.north = north\n self.direction = direction\n\n def move(self, direction: str, amount: int) -> None:\n if direction == \"E\":\n self.east += amount\n elif direction == \"S\":\n self.south += amount\n elif direction == \"W\":\n self.west += amount\n elif direction == \"N\":\n self.north += amount\n \n def move_to_waypoint(self, waypoint: Waypoint):\n self.east += waypoint.east\n self.south += waypoint.south\n self.west += waypoint.west\n self.north += waypoint.north\n \n def manhattan(self) -> int:\n return abs(self.north-self.south)+abs(self.east-self.west)\n \n def change_direction(self, direction: str, amount: int):\n if direction == \"R\":\n self.direction = (self.direction + amount) % 360\n elif direction == \"L\":\n self.direction = (self.direction - amount) % 360\n\n def __str__(self) -> str:\n return f\"East: {self.east} South: {self.south} West: {self.west} North: {self.north}\"\n\ndef get_input() -> list[Instruction]:\n input_path = Path(__file__).parent / \"input.txt\"\n data = [Instruction(chr(line[0]), int(line[1:].strip())) for line in input_path.open(mode=\"rb\").readlines()]\n return data\n\ndef part1(instructions: list[Instruction]) -> int:\n ship = Ship(0, 0, 0, 0, 0)\n directions = {0: \"E\", 90: \"S\", 180: \"W\", 270: \"N\"}\n for instruction in instructions:\n if instruction.direction == \"R\" or instruction.direction == \"L\":\n ship.change_direction(instruction.direction, instruction.amount)\n elif instruction.direction == \"F\":\n direction = directions[ship.direction]\n ship.move(direction, instruction.amount)\n else:\n ship.move(instruction.direction, instruction.amount)\n return ship.manhattan()\n\ndef part2(instructions: list[Instruction]) -> int:\n ship = Ship(0, 0, 0, 0, 0)\n waypoint = Waypoint(10, 0, 0, 1)\n for instruction in instructions:\n if instruction.direction == \"R\" or instruction.direction == \"L\":\n waypoint.rotate(instruction.direction, instruction.amount)\n elif instruction.direction == \"F\":\n for _ in range(instruction.amount):\n ship.move_to_waypoint(waypoint)\n else:\n waypoint.update(instruction.direction, instruction.amount)\n return ship.manhattan()\n\n\ndata = get_input()\nprint(f\"Part 1: {part1(data)}\")\nprint(f\"Part 2: {part2(data)}\")\n","repo_name":"nicokroe/Advent-of-Code-2020","sub_path":"day_12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19021352550","text":"import csv\nimport datetime\nimport random\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\nfrom faker import Faker\n\nFaker.seed(42)\nfake = Faker(locale=\"en_US\")\n\ndf = pd.read_csv(\"scraped_workouts.csv\")\ngby = df.groupby([\"Fitness Discipline\", \"Type\", \"Length (minutes)\", \"Title\"])\n\n\ndef generate_positive_number(mean: float, sigma: float, round_to: int = 2) -> float:\n \"\"\"\n Generates a random float value with a normal distribution. If the\n number is not positive, it will be generated again.\n\n Args:\n mean (Union[float,int]): The mean of the normal distribution.\n sigma (Union[float, int]): The standard deviation of the normal distribution.\n round_to (Union[float, int]): The number of decimal places to round the number to. Defaults to 2.\n\n Returns:\n float: A positive number of round_to decimal places.\n \"\"\"\n x = round(np.random.normal(mean, sigma), round_to)\n return x if x > 0 else generate_positive_number(mean, sigma, round_to)\n\n\ndef generate_value(\n mean: float, sigma: float, discipline: str, column: str, round_to: int = 2\n) -> Union[float, None]:\n \"\"\"\n Generates a random value with fixed mean and sigma for a given column.\n Returns None if the column key does not make sense for the type of discipline.\n Otherwise, returns the value with the given round_to decimal places.\n\n Args:\n mean (float): The mean of the normal distribution.\n sigma (float): The standard deviation of the normal distribution.\n discipline (str): The fitness discipline of the current row.\n round_to (int, optional): The number of digits to round the generated number to. Defaults to 2.\n column (_type_, optional): The column header to get random value for. Defaults to None.\n\n Returns:\n Union[float, None]: _description_\n \"\"\"\n if discipline != \"Cycling\" and column in {\n \"watts\",\n \"resistance\",\n \"cadence\",\n \"speed\",\n \"distance\",\n \"calories\",\n \"heart_rate\",\n }:\n return None\n if discipline == \"Running\" and column == \"heart_rate\":\n return None\n if discipline == \"Meditation\" or discipline == \"Running\" and column == \"output\":\n return None\n if discipline == \"Cycling\" and column == \"resistance\":\n return round(random.random() * 100, round_to)\n switcher = {\n \"output\": generate_positive_number(mean, sigma, round_to),\n \"resistance\": None,\n \"watts\": generate_positive_number(100, 30),\n \"cadence\": generate_positive_number(80, 13, 0),\n \"speed\": generate_positive_number(18, 3.40, 0),\n \"distance\": generate_positive_number(8, 5.02),\n \"calories\": generate_positive_number(200, 180, 0),\n \"heart_rate\": generate_positive_number(140, 20),\n }\n return switcher.get(column)\n\n\ndef generate_workout_metadata(row):\n \"\"\"\n Generates a workout metadata dictionary.\n\n Args:\n row (DataFrame): A row from the scraped_workouts.csv file.\n\n Returns:\n dict: A workout metadata dictionary.\n \"\"\"\n instructor_name = row[\"Instructor Name\"].values[0]\n length = row[\"Length (minutes)\"].values[0]\n fitness_discipline = row[\"Fitness Discipline\"].values[0]\n workout_type = row[\"Type\"].values[0]\n title = row[\"Title\"].values[0]\n return [instructor_name, length, fitness_discipline, workout_type, title]\n\n\ndef write_headers(writer: csv.writer):\n \"\"\"\n Writes the headers for the csv file.\n\n Args:\n writer (csv.writer): Headers for the csv file.\n \"\"\"\n writer.writerow(\n [\n \"Workout Timestamp\",\n \"Live/On-Demand\",\n \"Instructor Name\",\n \"Length (minutes)\",\n \"Fitness Discipline\",\n \"Type\",\n \"Title\",\n \"Class Timestamp\",\n \"Total Output\",\n \"Avg. Watts\",\n \"Avg. Resistance\",\n \"Avg. Cadence (RPM)\",\n \"Avg. Speed (mph)\",\n \"Distance (mi)\",\n \"Calories Burned\",\n \"Avg. Heartrate\",\n \"Avg. Incline\",\n \"Avg. Pace (min/mi)\",\n ]\n )\n\n\ndef generate_workout_metrics(fitness_discipline):\n \"\"\"\n Returns a dict of key value pairs for the workout metrics for a given fitness discipline.\n\n Args:\n fitness_discipline (str): The fitness discipline of the current row.\n\n Returns:\n dict: A dict of key value pairs for the workout metrics.\n \"\"\"\n total_output = generate_value(150.5, 10.80, fitness_discipline, \"output\", 2)\n avg_watts = generate_value(120, 35, fitness_discipline, \"watts\", 2)\n avg_resistance = generate_value(None, None, fitness_discipline, \"resistance\", 2)\n avg_cadence = generate_value(80, 15, fitness_discipline, \"cadence\", 0)\n avg_speed = generate_value(15.59, 2.10, fitness_discipline, \"speed\", 2)\n distance = generate_value(6.79, 5.89, fitness_discipline, \"distance\", 2)\n calories = generate_value(196.97, 178.52, fitness_discipline, \"calories\", 0)\n heart_rate = generate_value(149, 22.89, fitness_discipline, \"heart_rate\", 2)\n return [total_output, avg_watts, avg_resistance, avg_cadence, avg_speed, distance, calories, heart_rate]\n\n\ndef generate_csv() -> None:\n \"\"\"\n Uses sample data from the scraped_workouts.csv file to generate a csv file with Peloton workouts.\n \"\"\"\n with open(\"workouts.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n write_headers(writer)\n for _ in range(random.randint(100, 1000)):\n sample_row = df.sample()\n workout_timestamp = fake.date_time_between_dates(\n datetime_start=datetime.datetime(2021, 1, 1), datetime_end=\"now\"\n )\n workout_live_on_demand = \"Live\" if fake.boolean() else \"On Demand\"\n workout_metadata = generate_workout_metadata(sample_row)\n class_timestamp = fake.date_time_between_dates(\n datetime_start=datetime.datetime(2021, 1, 1), datetime_end=\"now\"\n )\n workout_metrics = generate_workout_metrics(workout_metadata[2])\n row = [workout_timestamp,workout_live_on_demand] + workout_metadata + [class_timestamp] + workout_metrics\n row.extend([None, None])\n writer.writerow(row)\n\n\nif __name__ == \"__main__\":\n generate_csv()\n","repo_name":"jramirez857/PelotonDashboard","sub_path":"csv_generator.py","file_name":"csv_generator.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"33603627350","text":"# hello.py\n# from mpi4py import MPI\n# comm = MPI.COMM_WORLD\n# rank = comm.Get_rank()\n# size = comm.Get_size()\n# print \"hello world from process \", rank+1, \" of \", size\n\nfrom mutex import Mutex\nfrom conditionvariable import ConditionVariable\nfrom monitor import Monitor\nimport time\n\nm = Mutex(1)\ncv = ConditionVariable(1)\nmonitor = Monitor() # nowe watki tworzone sa w monitorze\nif monitor.communicationManager.processId > 0: # te procesy symuluja czekanie\n monitor.lock(m)\n monitor.wait(cv, m)\n monitor.log(\"INFO\", \"Stopped waiting, going to sleep\")\n time.sleep(1)\n monitor.signal(cv)\n monitor.unlock(m)\nelse:\n time.sleep(2)\n monitor.lock(m)\n monitor.signal(cv)\n monitor.unlock(m)\nmonitor.finalize()\n","repo_name":"chris710/distributedmonitor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38366982594","text":"#!/usr/bin/env python\n\n# ========================================================================== #\n#\n# Create links to the parameter files in the data tree template\n#\n# ========================================================================== #\n\nfrom __future__ import print_function\nimport os\nimport sys\nfrom optparse import OptionParser\nimport subprocess\n\ndef main():\n\n global options\n\n # parse the command line\n\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option('--debug',\n dest='debug', default=False,\n action=\"store_true\",\n help='Set debugging on')\n parser.add_option('--verbose',\n dest='verbose', default=False,\n action=\"store_true\",\n help='Set verbose debugging on')\n parser.add_option('--templateDir',\n dest='templateDir', default=\"/tmp/templateDir\",\n help='Path of template - i.e. the directory tree template')\n parser.add_option('--installDir',\n dest='installDir', default=\"/tmp/installDir\",\n help='Where the tree will be installed')\n (options, args) = parser.parse_args()\n \n if (options.verbose):\n options.debug = True\n\n # debug print\n\n if (options.debug):\n print(\"Running script: \", os.path.basename(__file__), file=sys.stderr)\n print(\" Options:\", file=sys.stderr)\n print(\" Debug: \", options.debug, file=sys.stderr)\n print(\" Verbose: \", options.verbose, file=sys.stderr)\n print(\" Template dir: \", options.templateDir, file=sys.stderr)\n print(\" Install dir: \", options.installDir, file=sys.stderr)\n\n # make the install dir\n\n try:\n os.makedirs(options.installDir)\n except OSError as exc:\n if (options.verbose):\n print(\"WARNING: trying to create install dir\", file=sys.stderr)\n print(\" \", exc, file=sys.stderr)\n\n # Walk the template directory tree\n\n for dirPath, subDirList, fileList in os.walk(options.templateDir):\n for fileName in fileList:\n if (fileName[0] == '_'):\n handleParamFile(dirPath, fileName)\n\n sys.exit(0)\n\n########################################################################\n# Handle a parameter file entry\n\ndef handleParamFile(dirPath, paramFileName):\n\n if (options.debug):\n print(\"Handling param file, dir, paramFile: \", \\\n dirPath, \", \", paramFileName, file=sys.stderr)\n\n # compute sub dir\n\n subDir = dirPath[len(options.templateDir):]\n\n # compute install sub dir\n\n installSubDir = options.installDir + subDir\n\n if (options.debug):\n print(\"subDir: \", subDir, file=sys.stderr)\n print(\"installSubDir: \", installSubDir, file=sys.stderr)\n\n # make the install sub dir and go there\n\n try:\n os.makedirs(installSubDir)\n except OSError as exc:\n pass\n\n if (options.debug):\n print(\"os.chdir: \", installSubDir, file=sys.stderr)\n os.chdir(installSubDir)\n\n # remove the link if it exists\n\n if (os.path.exists(paramFileName)):\n os.remove(paramFileName)\n\n # create the link\n\n paramFilePath = os.path.join(options.templateDir + subDir, paramFileName)\n cmd = \"ln -s \" + paramFilePath\n runCommand(cmd)\n\n return\n\n########################################################################\n# Run a command in a shell, wait for it to complete\n\ndef runCommand(cmd):\n\n if (options.verbose == True):\n print(\"running cmd:\",cmd, file=sys.stderr)\n \n try:\n retcode = subprocess.call(cmd, shell=True)\n if retcode < 0:\n print(\"Child was terminated by signal: \", -retcode, file=sys.stderr)\n else:\n if (options.verbose == True):\n print(\"Child returned code: \", retcode, file=sys.stderr)\n except OSError as e:\n print(\"Execution failed:\", e, file=sys.stderr)\n\n########################################################################\n# Run - entry point\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NCAR/lrose-projects-precip","sub_path":"projDir/system/scripts/createParamLinks.py","file_name":"createParamLinks.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"24503879386","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\nimport cv2\nimport time\n\nPr = 1\n\n\ndef get_cost_processing(used_time):\n cost = 10 * used_time * Pr\n print(f\" >> [COST] Processing cost = {cost*1000} mW\")\n return cost\n\n\ndef mean_absolute_error(gt_array, es_array):\n gt_average = np.mean(gt_array)\n es_average = np.mean(es_array)\n mae = np.abs(gt_average - es_average)\n return mae\n\n\ndef plot_temperature_array(input_array):\n average_temp = np.ones(len(input_array)) * np.mean(input_array)\n plt.plot(input_array, \"b\", label=\"Temperatures\")\n plt.plot(average_temp, \"r\", label=\"Temp. Average\")\n plt.title(f\"Temperature sampled during {len(input_array)} seconds\")\n plt.legend()\n plt.xlabel(\"Time (seconds)\")\n plt.ylabel(\"Temperature (degrees Celsius)\")\n plt.grid(True, which='major', linestyle='-')\n plt.grid(True, which='minor', linestyle='--')\n # plt.yticks(np.arange(17, 25, 0.1))\n\n plt.show()\n\n\ndef regulation_temp(input_temp):\n if input_temp < 18:\n input_temp = 18\n if input_temp > 24:\n input_temp = 24\n return input_temp\n\n\ndef main():\n print(\"Simulation\")\n\n cv2.namedWindow(\"Temperature\", cv2.WINDOW_NORMAL)\n temperature = 22\n temperatures_array = []\n max_samples = 60\n number_samples = 0\n used_time = 0\n while number_samples < max_samples:\n variance = np.random.uniform(-1, 1)\n temperature += variance\n temperature = regulation_temp(temperature)\n\n ####################\n # PROCESS #\n ####################\n t0 = time.time()\n temperatures_array.append(temperature)\n used_time += time.time() - t0\n\n image = np.zeros((500, 500, 3), np.uint8)\n cv2.putText(image, f\"{temperature:.2f} C\", (160, 260), cv2.FONT_HERSHEY_SIMPLEX, 1.6, (255, 255, 255), 3, cv2.LINE_AA)\n cv2.imshow(\"Temperature\", image)\n k = cv2.waitKey(1000)\n # k = cv2.waitKey(0)\n if k == 27:\n break\n number_samples += 1\n\n cost_processing = get_cost_processing(used_time)\n plot_temperature_array(temperatures_array)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Arritmic/SmartIoTSensing","sub_path":"scripts/temperature_simulation.py","file_name":"temperature_simulation.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29747049845","text":"import pytest\nimport responses\nimport re\nfrom flask import json\nfrom requests import ConnectionError\n\nfrom backend.util.response.store.product_results import ProductResultsSchema, ProductResultsResponse\nfrom backend.util.response.error import ErrorSchema\n\n\n@pytest.fixture(scope=\"module\")\ndef response_json():\n return {\n \"id\": \"string\",\n \"name\": \"string\",\n \"kind\": \"string\",\n \"brand\": \"string\",\n \"details\": [\n \"string\"\n ],\n \"care\": \"string\",\n \"about\": \"string\",\n \"images\": [\n \"string\"\n ],\n \"gender\": \"string\",\n \"price\": {\n \"outlet\": 10.55,\n \"retail\": 20.9,\n \"symbol\": \"£\"\n }\n }\n\n\ndef test_product_controller(flask_app, willstores_ws, response_json):\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, re.compile(willstores_ws),\n status=200,\n json=response_json\n )\n\n with flask_app.test_client() as client:\n response = client.get(\n \"api/store/product/id\"\n )\n\n data = json.loads(response.data)\n ProductResultsSchema().load(data)\n assert response.status_code == 200\n\n\n@pytest.mark.parametrize(\n \"method,http_method,test_url,error,status_code\",\n [\n (\"marshall_json\", \"GET\", \"api/store/product/id\", ConnectionError(), 502),\n (\"marshall_json\", \"GET\", \"api/store/product/id\", Exception(), 500)\n ]\n)\ndef test_product_controller_error(mocker, willstores_ws, get_request_function, response_json, method, http_method, test_url, error, status_code):\n mocker.patch.object(ProductResultsResponse, method, side_effect=error)\n\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, re.compile(willstores_ws),\n status=200,\n json=response_json\n )\n\n make_request = get_request_function(http_method)\n\n response = make_request(\n test_url\n )\n\n data = json.loads(response.data)\n ErrorSchema().load(data)\n\n assert response.status_code == status_code\n\n\n@pytest.mark.parametrize(\n \"test_url, status_code\",\n [\n (\"api/store/product/id\", 400),\n (\"api/store/product/id\", 401),\n (\"api/store/product/id\", 404),\n (\"api/store/product/id\", 500),\n (\"api/store/product/id\", 502),\n (\"api/store/product/id\", 504)\n ]\n)\ndef test_product_controller_http_error(flask_app, willstores_ws, json_error_recv, test_url, status_code):\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, re.compile(willstores_ws),\n status=status_code,\n json=json_error_recv\n )\n\n with flask_app.test_client() as client:\n response = client.get(\n test_url\n )\n\n data = json.loads(response.data)\n ErrorSchema().load(data)\n\n assert response.status_code == status_code\n","repo_name":"willrp/willbuyer","sub_path":"backend/tests/unit/controller/api/store/product/test_product_controller.py","file_name":"test_product_controller.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21992433521","text":"\ndef anagrams(word, words):\n sortedSample = sorted(word) \n anaList = [ an for an in words if sorted(an) == sortedSample ]\n return anaList\n \n\n\nprint(anagrams('abba', ['aabb', 'abcd', 'bbaa', 'dada']))\nprint(anagrams('racer', ['crazer', 'carer', 'racar', 'caers', 'racer']))\n","repo_name":"sashaPost/codewars_katas","sub_path":"python_codewars/5_kyu/anagrams.py","file_name":"anagrams.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41262116537","text":"# -*- coding: utf-8 -*-\n# @Author: WuLC\n# @Date: 2016-09-28 22:46:52\n# @Last modified by: WuLC\n# @Last Modified time: 2016-09-28 23:00:41\n# @Email: liangchaowu5@gmail.com\n\n# Trie, HashTable\nclass Solution(object):\n def palindromePairs(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: List[List[int]]\n \"\"\"\n result = []\n # build Trie\n root = {}\n for i in xrange(len(words)):\n curr = root\n for char in words[i][::-1]:\n curr.setdefault(char, {})\n curr = curr[char]\n curr['#'] = i # represents a word\n \n # search\n for i in xrange(len(words)):\n # ignore empty string, cause the following code has dealt with it\n if len(words[i]) == 0:\n continue\n\n \n curr, match = root, True\n for j in xrange(len(words[i])):\n if '#' in curr and self.is_palindrome(words[i][j:]) and curr['#'] != i:\n result.append([i, curr['#']])\n if curr == root: # empty string matches from both sides\n result.append([curr['#'], i])\n if words[i][j] in curr:\n curr = curr[words[i][j]]\n else:\n match = False\n break\n if match:\n for index in self.dfs(curr, ''):\n if index != i:\n result.append([i,index])\n return result\n \n # find all the palindrome string under root and return its' indices\n def dfs(self, root, tmp):\n if len(root) == 1 and '#' in root:\n return [root['#']] if self.is_palindrome(tmp) else []\n indices = []\n if '#' in root and self.is_palindrome(tmp):\n indices.append(root['#'])\n for k,v in root.items():\n if k != '#':\n indices += self.dfs(v, tmp+k)\n return indices\n \n def is_palindrome(self, s):\n i, j = 0, len(s) - 1 \n while i < j:\n if s[i] != s[j]:\n return False\n i += 1\n j -= 1\n return True","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/336. Palindrome Pairs.py","file_name":"336. Palindrome Pairs.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"35483259470","text":"import os\nimport sys\nimport glob\nimport difflib\nimport hashlib\nimport datetime\n\nfrom collections import defaultdict\n\nimport yara\n\nfrom lxml import etree as ET\n\nfrom flask import render_template, g\n\nfrom cabarchive import CabArchive, CabFile\nfrom jcat import JcatFile, JcatBlobSha1, JcatBlobSha256, JcatBlobKind\n\nfrom lvfs import app, db, ploader\nfrom lvfs.dbutils import _execute_count_star\nfrom lvfs.emails import send_email\nfrom lvfs.firmware.utils import _firmware_delete\nfrom lvfs.models import Remote, Firmware, Vendor, Client, AnalyticVendor, User, YaraQuery, YaraQueryResult\nfrom lvfs.models import AnalyticFirmware, Useragent, UseragentKind, Analytic, Report, Metric\nfrom lvfs.models import ComponentShard, ComponentShardInfo, Test, Component, Category, Protocol, FirmwareEvent\nfrom lvfs.models import _get_datestr_from_datetime\nfrom lvfs.metadata.utils import _metadata_update_targets, _metadata_update_pulp, _generate_metadata_mds\nfrom lvfs.util import _event_log, _get_shard_path, _get_absolute_path\nfrom lvfs.upload.uploadedfile import UploadedFile, MetadataInvalid\n\ndef _regenerate_and_sign_metadata(only_embargo=False):\n\n # get list of dirty remotes\n remotes = []\n for r in db.session.query(Remote):\n if not r.is_signed:\n continue\n # fix up any remotes that are not dirty, but have firmware that is dirty\n # -- which shouldn't happen, but did...\n if not r.is_dirty:\n for fw in r.fws:\n if not fw.is_dirty:\n continue\n print('Marking remote %s as dirty due to %u' % (r.name, fw.firmware_id))\n r.is_dirty = True\n if r.is_dirty:\n if r.is_public and only_embargo:\n continue\n remotes.append(r)\n\n # nothing to do\n if not remotes:\n return\n\n # set destination path from app config\n download_dir = app.config['DOWNLOAD_DIR']\n if not os.path.exists(download_dir):\n os.mkdir(download_dir)\n\n # update everything required\n invalid_fns = []\n for r in remotes:\n print('Updating: %s' % r.name)\n for r, blob_xmlgz in _metadata_update_targets(remotes):\n\n # write metadata-?????.xml.gz\n fn_xmlgz = os.path.join(download_dir, r.filename)\n with open(fn_xmlgz, 'wb') as f:\n f.write(blob_xmlgz)\n invalid_fns.append(fn_xmlgz)\n\n # write metadata.xml.gz\n fn_xmlgz = os.path.join(download_dir, r.filename_newest)\n with open(fn_xmlgz, 'wb') as f:\n f.write(blob_xmlgz)\n invalid_fns.append(fn_xmlgz)\n\n # create Jcat item with SHA256 checksum blob\n jcatfile = JcatFile()\n jcatitem = jcatfile.get_item(r.filename)\n jcatitem.add_alias_id(r.filename_newest)\n jcatitem.add_blob(JcatBlobSha1(blob_xmlgz))\n jcatitem.add_blob(JcatBlobSha256(blob_xmlgz))\n\n # write each signed file\n for blob in ploader.metadata_sign(blob_xmlgz):\n\n # add GPG only to archive for backwards compat with older fwupd\n if blob.kind == JcatBlobKind.GPG:\n fn_xmlgz_asc = fn_xmlgz + '.' + blob.filename_ext\n with open(fn_xmlgz_asc, 'wb') as f:\n f.write(blob.data)\n invalid_fns.append(fn_xmlgz_asc)\n\n # add to Jcat file too\n jcatitem.add_blob(blob)\n\n # write jcat file\n fn_xmlgz_jcat = fn_xmlgz + '.jcat'\n with open(fn_xmlgz_jcat, 'wb') as f:\n f.write(jcatfile.save())\n invalid_fns.append(fn_xmlgz_jcat)\n\n # update PULP\n for r in remotes:\n if r.name == 'stable':\n _metadata_update_pulp(download_dir)\n\n # do this all at once right at the end of all the I/O\n for fn in invalid_fns:\n print('Invalidating {}'.format(fn))\n ploader.file_modified(fn)\n\n # mark as no longer dirty\n for r in remotes:\n if not r.build_cnt:\n r.build_cnt = 0\n r.build_cnt += 1\n r.is_dirty = False\n db.session.commit()\n\n # drop caches in other sessions\n db.session.expire_all()\n\n # log what we did\n for r in remotes:\n _event_log('Signed metadata {} build {}'.format(r.name, r.build_cnt))\n\n # only keep the last 6 metadata builds (24h / stable refresh every 4h)\n for r in remotes:\n if not r.filename:\n continue\n suffix = r.filename.split('-')[2]\n fns = glob.glob(os.path.join(download_dir, 'firmware-*-{}'.format(suffix)))\n for fn in sorted(fns):\n build_cnt = int(fn.split('-')[1])\n if build_cnt + 6 > r.build_cnt:\n continue\n os.remove(fn)\n _event_log('Deleted metadata {} build {}'.format(r.name, build_cnt))\n\ndef _show_diff(blob_old, blob_new):\n fromlines = blob_old.decode().replace('\\r', '').split('\\n')\n tolines = blob_new.decode().split('\\n')\n diff = difflib.unified_diff(fromlines, tolines)\n print('\\n'.join(list(diff)[3:]))\n\ndef _sign_fw(fw):\n\n # load the .cab file\n download_dir = app.config['DOWNLOAD_DIR']\n fn = os.path.join(download_dir, fw.filename)\n try:\n with open(fn, 'rb') as f:\n cabarchive = CabArchive(f.read())\n except IOError as e:\n raise NotImplementedError('cannot read %s: %s' % (fn, str(e)))\n\n # create Jcat file\n jcatfile = JcatFile()\n\n # sign each component in the archive\n print('Signing: %s' % fn)\n for md in fw.mds:\n try:\n\n # create Jcat item with SHA1 and SHA256 checksum blob\n cabfile = cabarchive[md.filename_contents]\n jcatitem = jcatfile.get_item(md.filename_contents)\n jcatitem.add_blob(JcatBlobSha1(cabfile.buf))\n jcatitem.add_blob(JcatBlobSha256(cabfile.buf))\n\n # sign using plugins\n for blob in ploader.archive_sign(cabfile.buf):\n\n # add GPG only to archive for backwards compat with older fwupd\n if blob.kind == JcatBlobKind.GPG:\n fn_blob = md.filename_contents + '.' + blob.filename_ext\n cabarchive[fn_blob] = CabFile(blob.data)\n\n # add to Jcat file too\n jcatitem.add_blob(blob)\n\n except KeyError as _:\n raise NotImplementedError('no {} firmware found'.format(md.filename_contents))\n\n # rewrite the metainfo.xml file to reflect latest changes and sign it\n for md in fw.mds:\n\n # write new metainfo.xml file\n component = _generate_metadata_mds([md], metainfo=True)\n blob_xml = b'\\n' + \\\n ET.tostring(component,\n encoding='UTF-8',\n xml_declaration=False,\n pretty_print=True)\n _show_diff(cabarchive[md.filename_xml].buf, blob_xml)\n cabarchive[md.filename_xml].buf = blob_xml\n\n # sign it\n jcatitem = jcatfile.get_item(md.filename_xml)\n jcatitem.add_blob(JcatBlobSha1(blob_xml))\n jcatitem.add_blob(JcatBlobSha256(blob_xml))\n for blob in ploader.archive_sign(blob_xml):\n jcatitem.add_blob(blob)\n\n # write jcat file\n if jcatfile.items:\n cabarchive['firmware.jcat'] = CabFile(jcatfile.save())\n\n # overwrite old file\n cab_data = cabarchive.save()\n with open(fn, 'wb') as f:\n f.write(cab_data)\n\n # inform the plugin loader\n ploader.file_modified(fn)\n\n # update the download size\n for md in fw.mds:\n md.release_download_size = len(cab_data)\n\n # update the database\n fw.checksum_signed_sha1 = hashlib.sha1(cab_data).hexdigest()\n fw.checksum_signed_sha256 = hashlib.sha256(cab_data).hexdigest()\n fw.signed_timestamp = datetime.datetime.utcnow()\n db.session.commit()\n\ndef _repair_ts():\n\n # fix any timestamps that are incorrect\n for md in db.session.query(Component).filter(Component.release_timestamp < 1980):\n fn = _get_absolute_path(md.fw)\n if not os.path.exists(fn):\n continue\n print(fn, md.release_timestamp)\n try:\n ufile = UploadedFile(is_strict=False)\n for cat in db.session.query(Category):\n ufile.category_map[cat.value] = cat.category_id\n for pro in db.session.query(Protocol):\n ufile.protocol_map[pro.value] = pro.protocol_id\n with open(fn, 'rb') as f:\n ufile.parse(os.path.basename(fn), f.read())\n except MetadataInvalid as e:\n print('failed to parse file: {}'.format(str(e)))\n continue\n for md_local in ufile.fw.mds:\n if md_local.appstream_id == md.appstream_id:\n print('repairing timestamp from {} to {}'.format(md.release_timestamp,\n md_local.release_timestamp))\n md.release_timestamp = md_local.release_timestamp\n md.fw.mark_dirty()\n\n # all done\n db.session.commit()\n\ndef _fsck():\n for fw in db.session.query(Firmware):\n fn = _get_absolute_path(fw)\n if not os.path.isfile(fn):\n print('firmware {} is missing, expected {}'.format(fw.firmware_id, fn))\n\ndef _repair_csum():\n\n # fix all the checksums and file sizes\n for firmware_id in db.session.query(Firmware.firmware_id)\\\n .order_by(Firmware.firmware_id.asc()):\n fw = db.session.query(Firmware)\\\n .filter(Firmware.firmware_id == firmware_id)\\\n .one()\n try:\n print('checking {}'.format(fw.filename_absolute))\n with open(fw.filename_absolute, 'rb') as f:\n checksum_signed_sha1 = hashlib.sha1(f.read()).hexdigest()\n if checksum_signed_sha1 != fw.checksum_signed_sha1:\n print('repairing checksum from {} to {}'.format(fw.checksum_signed_sha1,\n checksum_signed_sha1))\n fw.checksum_signed_sha1 = checksum_signed_sha1\n fw.mark_dirty()\n checksum_signed_sha256 = hashlib.sha256(f.read()).hexdigest()\n if checksum_signed_sha256 != fw.checksum_signed_sha256:\n print('repairing checksum from {} to {}'.format(fw.checksum_signed_sha256,\n checksum_signed_sha256))\n fw.checksum_signed_sha256 = checksum_signed_sha256\n fw.mark_dirty()\n for md in fw.mds:\n sz = os.path.getsize(fw.filename_absolute)\n if sz != md.release_download_size:\n print('repairing size from {} to {}'.format(md.release_download_size, sz))\n md.release_download_size = sz\n md.fw.mark_dirty()\n except FileNotFoundError as _:\n pass\n\n # all done\n db.session.commit()\n\ndef _regenerate_and_sign_firmware():\n\n # find all unsigned firmware\n fws = db.session.query(Firmware).\\\n filter(Firmware.signed_timestamp == None).all()\n if not fws:\n return\n\n # sign each firmware in each file\n for fw in fws:\n if fw.is_deleted:\n continue\n print('Signing firmware %u...' % fw.firmware_id)\n _sign_fw(fw)\n _event_log('Signed firmware %s' % fw.firmware_id)\n\n # drop caches in other sessions\n db.session.expire_all()\n\ndef _ensure_tests():\n\n # ensure the test has been added for the firmware type\n for fw in db.session.query(Firmware).order_by(Firmware.timestamp):\n if not fw.is_deleted:\n ploader.ensure_test_for_fw(fw)\n db.session.commit()\n\ndef _delete_embargo_obsoleted_fw():\n\n # all embargoed firmware\n emails = defaultdict(list)\n for fw in db.session.query(Firmware)\\\n .join(Remote)\\\n .filter(Remote.name.startswith('embargo'))\\\n .order_by(Firmware.timestamp.asc()):\n\n # less than 6 months old\n if fw.target_duration < datetime.timedelta(days=30*6):\n continue\n\n # check that all the components are available with new versions\n all_newer = True\n print(fw.target_duration, fw.remote.name, fw.version_display)\n for md in fw.mds:\n md_newest = None\n for md_new in db.session.query(Component)\\\n .join(Firmware)\\\n .join(Remote)\\\n .filter(Remote.is_public)\\\n .filter(Component.appstream_id == md.appstream_id)\\\n .order_by(Firmware.timestamp.asc()):\n if md_new > md or (md_newest and md_new > md_newest):\n md_newest = md_new\n break\n if not md_newest:\n all_newer = False\n print('no newer version of {} {}'.format(md.appstream_id,\n md.version_display))\n break\n print('{} {} [{}] is newer than {} [{}]'.format(md.appstream_id,\n md_newest.version_display,\n md_newest.fw.remote.name,\n md.version_display,\n md.fw.remote.name))\n if not all_newer:\n continue\n\n # delete, but not purge for another 6 months...\n _firmware_delete(fw)\n\n # dedupe emails by user\n emails[fw.user].append(fw)\n\n # send email to the user that uploaded them, unconditionally\n for user in emails:\n send_email(\"[LVFS] Firmware has been obsoleted\",\n user.email_address,\n render_template('email-firmware-obsolete.txt',\n user=user, fws=emails[user]))\n\n # all done\n db.session.commit()\n\ndef _purge_old_deleted_firmware():\n\n # find all unsigned firmware\n for fw in db.session.query(Firmware)\\\n .join(Remote).filter(Remote.name == 'deleted')\\\n .order_by(Firmware.timestamp.asc()):\n if fw.target_duration > datetime.timedelta(days=30*6):\n print('Deleting %s as age %s' % (fw.filename, fw.target_duration))\n path = os.path.join(app.config['RESTORE_DIR'], fw.filename)\n if os.path.exists(path):\n os.remove(path)\n for md in fw.mds:\n for shard in md.shards:\n path = _get_shard_path(shard)\n if os.path.exists(path):\n os.remove(path)\n db.session.delete(fw)\n db.session.commit()\n\ndef _test_priority_sort_func(test):\n plugin = ploader.get_by_id(test.plugin_id)\n if not plugin:\n return 0\n return plugin.priority\n\ndef _yara_query_shard(query, md, shard):\n if not shard.blob:\n return\n matches = query.rules.match(data=shard.blob)\n for match in matches:\n msg = match.rule\n for string in match.strings:\n if len(string) == 3:\n try:\n msg += ': found {}'.format(string[2].decode())\n except UnicodeDecodeError as _:\n pass\n query.results.append(YaraQueryResult(md=md, shard=shard, result=msg))\n\n # unallocate the cached blob as it's no longer needed\n shard.blob = None\n\ndef _yara_query_component(query, md):\n if not md.blob:\n return\n matches = query.rules.match(data=md.blob)\n for match in matches:\n msg = match.rule\n for string in match.strings:\n if len(string) == 3:\n try:\n msg += ': found {}'.format(string[2].decode())\n except UnicodeDecodeError as _:\n pass\n query.results.append(YaraQueryResult(md=md, result=msg))\n\n # unallocate the cached blob as it's no longer needed\n md.blob = None\n\ndef _yara_query_all():\n\n # get all pending queries\n pending = db.session.query(YaraQuery).\\\n filter(YaraQuery.started_ts == None).\\\n filter(YaraQuery.error == None)\n if not pending:\n return\n\n # get list of component IDs (as integers)\n component_ids = [x[0] for x in db.session.query(Component.component_id)\\\n .join(Firmware)\\\n .join(Remote)\\\n .filter(Remote.name == 'stable').all()]\n\n for query in pending:\n print('processing query {}: {}...'.format(query.yara_query_id, query.title))\n try:\n query.rules = yara.compile(source=query.value)\n except yara.SyntaxError as e:\n query.error = 'Failed to compile rules: {}'.format(str(e))\n db.session.commit()\n continue\n query.started_ts = datetime.datetime.utcnow()\n db.session.commit()\n for component_id in component_ids:\n md = db.session.query(Component)\\\n .filter(Component.component_id == component_id)\\\n .one()\n for shard in md.shards:\n _yara_query_shard(query, md, shard)\n _yara_query_component(query, md)\n query.total += len(md.shards)\n query.found = len(query.results)\n query.ended_ts = datetime.datetime.utcnow()\n db.session.commit()\n\ndef _check_firmware():\n\n # make a list of the first few tests that need running\n tests = db.session.query(Test)\\\n .filter(Test.started_ts == None)\\\n .order_by(Test.scheduled_ts)\\\n .limit(50).all()\n\n # mark all the tests as started\n for test in tests:\n print('Marking test {} started for firmware {}...'.format(test.plugin_id, test.fw.firmware_id))\n test.started_ts = datetime.datetime.utcnow()\n db.session.commit()\n\n # process each test\n for test in sorted(tests, key=_test_priority_sort_func):\n plugin = ploader.get_by_id(test.plugin_id)\n if not plugin:\n _event_log('No plugin %s' % test.plugin_id)\n test.ended_ts = datetime.datetime.utcnow()\n continue\n try:\n print('Running test {} for firmware {}'.format(test.plugin_id, test.fw.firmware_id))\n if hasattr(plugin, 'run_test_on_fw'):\n if hasattr(plugin, 'require_test_for_fw'):\n if not plugin.require_test_for_fw(test.fw):\n continue\n plugin.run_test_on_fw(test, test.fw)\n if hasattr(plugin, 'run_test_on_md'):\n for md in test.fw.mds:\n if hasattr(plugin, 'require_test_for_md'):\n if not plugin.require_test_for_md(md):\n continue\n plugin.run_test_on_md(test, md)\n test.ended_ts = datetime.datetime.utcnow()\n # don't leave a failed task running\n db.session.commit()\n except Exception as e: # pylint: disable=broad-except\n test.ended_ts = datetime.datetime.utcnow()\n test.add_fail('An exception occurred', str(e))\n\n # all done\n db.session.commit()\n\n\ndef _generate_stats_for_vendor(v, datestr):\n\n # is datestr older than firmware\n if not v.ctime:\n return\n if datestr < _get_datestr_from_datetime(v.ctime - datetime.timedelta(days=1)):\n return\n\n # get all the firmware for a specific vendor\n fw_ids = [fw.firmware_id for fw in v.fws]\n if not fw_ids:\n return\n\n # count how many times any of the firmware files were downloaded\n cnt = _execute_count_star(db.session.query(Client).\\\n filter(Client.firmware_id.in_(fw_ids)).\\\n filter(Client.datestr == datestr))\n analytic = AnalyticVendor(vendor_id=v.vendor_id, datestr=datestr, cnt=cnt)\n print('adding %s:%s = %i' % (datestr, v.group_id, cnt))\n db.session.add(analytic)\n\ndef _generate_stats_for_firmware(fw, datestr):\n\n # is datestr older than firmware\n if datestr < _get_datestr_from_datetime(fw.timestamp):\n return\n\n # count how many times any of the firmware files were downloaded\n cnt = _execute_count_star(db.session.query(Client).\\\n filter(Client.firmware_id == fw.firmware_id).\\\n filter(Client.datestr == datestr))\n analytic = AnalyticFirmware(firmware_id=fw.firmware_id, datestr=datestr, cnt=cnt)\n db.session.add(analytic)\n\ndef _demote_back_to_testing(fw):\n\n # from the server admin\n user = db.session.query(User).filter(User.username == 'anon@fwupd.org').first()\n if not user:\n return\n\n # send email to uploading user\n if fw.user.get_action('notify-demote-failures'):\n send_email(\"[LVFS] Firmware has been demoted\",\n fw.user.email_address,\n render_template('email-firmware-demote.txt',\n user=fw.user, fw=fw))\n\n fw.mark_dirty()\n remote = db.session.query(Remote).filter(Remote.name == 'testing').first()\n remote.is_dirty = True\n fw.remote_id = remote.remote_id\n fw.events.append(FirmwareEvent(remote_id=fw.remote_id, user_id=user.user_id))\n db.session.commit()\n _event_log('Demoted firmware {} as reported success {}%'.format(fw.firmware_id, fw.success))\n\ndef _generate_stats_firmware_reports(fw):\n\n # count how many times any of the firmware files were downloaded\n reports_success = 0\n reports_failure = 0\n reports_issue = 0\n for r in db.session.query(Report).\\\n filter(Report.firmware_id == fw.firmware_id,\n Report.timestamp > datetime.date.today() - datetime.timedelta(weeks=26)):\n if r.state == 2:\n reports_success += 1\n if r.state == 3:\n if r.issue_id:\n reports_issue += 1\n else:\n reports_failure += 1\n\n # update\n fw.report_success_cnt = reports_success\n fw.report_failure_cnt = reports_failure\n fw.report_issue_cnt = reports_issue\n\n # check the limits and demote back to embargo if required\n if fw.remote.name == 'stable' and fw.is_failure:\n _demote_back_to_testing(fw)\n\ndef _get_app_from_ua(ua):\n # always exists\n return ua.split(' ')[0]\n\ndef _get_fwupd_from_ua(ua):\n for part in ua.split(' '):\n if part.startswith('fwupd/'):\n return part[6:]\n return 'Unknown'\n\ndef _get_lang_distro_from_ua(ua):\n start = ua.find('(')\n end = ua.rfind(')')\n if start == -1 or end == -1:\n return None\n parts = ua[start+1:end].split('; ')\n if len(parts) != 3:\n return None\n return (parts[1], parts[2])\n\ndef _generate_stats_shard_info(info):\n\n cnt = db.session.query(ComponentShard.component_shard_id)\\\n .filter(ComponentShard.guid == info.guid)\\\n .count()\n if info.cnt != cnt:\n print('fixing ComponentShardInfo %i: %i -> %i' % (info.component_shard_info_id, info.cnt, cnt))\n info.cnt = cnt\n\ndef _generate_stats(kinds=None):\n if not kinds:\n kinds = ['FirmwareReport', 'ShardCount', 'ShardInfo', 'Metrics']\n\n # Set ComponentShardInfo in ComponentShard if GUID matches\n if 'Metrics' in kinds:\n print('stats::Metrics')\n values = {}\n values['ClientCnt'] = _execute_count_star(\\\n db.session.query(Client))\n values['FirmwareCnt'] = _execute_count_star(\\\n db.session.query(Firmware))\n values['FirmwareStableCnt'] = _execute_count_star(\\\n db.session.query(Firmware)\\\n .join(Remote)\\\n .filter(Remote.name == 'stable'))\n values['FirmwareTestingCnt'] = _execute_count_star(\\\n db.session.query(Firmware)\\\n .join(Remote)\\\n .filter(Remote.name == 'testing'))\n values['FirmwarePrivateCnt'] = _execute_count_star(\\\n db.session.query(Firmware)\\\n .join(Remote)\\\n .filter(Remote.is_public == False))\n values['TestCnt'] = _execute_count_star(\\\n db.session.query(Test))\n values['ReportCnt'] = _execute_count_star(\\\n db.session.query(Report))\n values['ProtocolCnt'] = _execute_count_star(\\\n db.session.query(Protocol))\n values['ComponentShardInfoCnt'] = _execute_count_star(\\\n db.session.query(ComponentShardInfo))\n values['ComponentShardCnt'] = _execute_count_star(\\\n db.session.query(ComponentShard))\n values['ComponentCnt'] = _execute_count_star(\\\n db.session.query(Component))\n values['VendorCnt'] = _execute_count_star(\\\n db.session.query(Vendor)\\\n .filter(Vendor.visible)\\\n .filter(Vendor.username_glob != None))\n values['UserCnt'] = _execute_count_star(\\\n db.session.query(User)\\\n .filter(User.auth_type != 'disabled'))\n\n # save to database\n for key in values:\n metric = db.session.query(Metric).filter(Metric.key == key).first()\n if not metric:\n metric = Metric(key=key)\n db.session.add(metric)\n metric.value = values[key]\n print('{}={}'.format(metric.key, metric.value))\n db.session.commit()\n\n if 'ShardInfo' in kinds:\n print('stats::ShardInfo')\n infos = {}\n for info in db.session.query(ComponentShardInfo):\n infos[info.guid] = info\n for component_shard_id, in db.session.query(ComponentShard.component_shard_id).\\\n filter(ComponentShard.component_shard_info_id == None):\n shard = db.session.query(ComponentShard).\\\n filter(ComponentShard.component_shard_id == component_shard_id).one()\n shard.info = infos.get(shard.guid)\n if shard.info:\n print('fixing shard {} with {}'.format(component_shard_id, shard.guid))\n else:\n print('creating ComponentShardInfo for {}'.format(shard.guid))\n shard.info = ComponentShardInfo(guid=shard.guid)\n infos[shard.guid] = shard.info\n db.session.commit()\n\n # update ComponentShardInfo.cnt\n if 'ShardCount' in kinds:\n print('stats::ShardCount')\n for info_id, in db.session.query(ComponentShardInfo.component_shard_info_id)\\\n .order_by(ComponentShardInfo.component_shard_info_id.asc()):\n info = db.session.query(ComponentShardInfo)\\\n .filter(ComponentShardInfo.component_shard_info_id == info_id)\\\n .one()\n _generate_stats_shard_info(info)\n db.session.commit()\n\n # update FirmwareReport counts\n if 'FirmwareReport' in kinds:\n print('stats::FirmwareReport')\n for fw in db.session.query(Firmware)\\\n .join(Remote).filter(Remote.name != 'deleted'):\n _generate_stats_firmware_reports(fw)\n db.session.commit()\n\n print('generated %s' % ','.join(kinds))\n\ndef _generate_stats_for_datestr(datestr, kinds=None):\n\n if not kinds:\n kinds = ['Analytic',\n 'AnalyticVendor',\n 'AnalyticFirmware',\n 'Useragent']\n\n # update AnalyticVendor\n if 'AnalyticVendor' in kinds:\n for analytic in db.session.query(AnalyticVendor).filter(AnalyticVendor.datestr == datestr):\n db.session.delete(analytic)\n db.session.commit()\n for v in db.session.query(Vendor):\n _generate_stats_for_vendor(v, datestr)\n db.session.commit()\n\n # update AnalyticFirmware\n if 'AnalyticFirmware' in kinds:\n for analytic in db.session.query(AnalyticFirmware).filter(AnalyticFirmware.datestr == datestr):\n db.session.delete(analytic)\n db.session.commit()\n for fw in db.session.query(Firmware)\\\n .join(Remote).filter(Remote.name != 'deleted'):\n _generate_stats_for_firmware(fw, datestr)\n db.session.commit()\n\n # update Useragent\n if 'Useragent' in kinds:\n for agnt in db.session.query(Useragent).filter(Useragent.datestr == datestr):\n db.session.delete(agnt)\n db.session.commit()\n ua_apps = {}\n ua_fwupds = {}\n ua_distros = {}\n ua_langs = {}\n clients = db.session.query(Client.user_agent).\\\n filter(Client.datestr == datestr).all()\n for res in clients:\n ua = res[0]\n if not ua:\n continue\n\n # downloader app\n ua_app = _get_app_from_ua(ua)\n if ua_app not in ua_apps:\n ua_apps[ua_app] = 1\n else:\n ua_apps[ua_app] += 1\n\n # fwupd version\n ua_fwupd = _get_fwupd_from_ua(ua)\n if ua_fwupd not in ua_fwupds:\n ua_fwupds[ua_fwupd] = 1\n else:\n ua_fwupds[ua_fwupd] += 1\n\n # language and distro\n ua_lang_distro = _get_lang_distro_from_ua(ua)\n if ua_lang_distro:\n ua_lang = ua_lang_distro[0]\n ua_distro = ua_lang_distro[1]\n if ua_lang not in ua_langs:\n ua_langs[ua_lang] = 1\n else:\n ua_langs[ua_lang] += 1\n if ua_distro not in ua_distros:\n ua_distros[ua_distro] = 1\n else:\n ua_distros[ua_distro] += 1\n for ua in ua_apps:\n db.session.add(Useragent(kind=int(UseragentKind.APP), value=ua, datestr=datestr, cnt=ua_apps[ua]))\n for ua in ua_fwupds:\n db.session.add(Useragent(kind=int(UseragentKind.FWUPD), value=ua, datestr=datestr, cnt=ua_fwupds[ua]))\n for ua in ua_langs:\n db.session.add(Useragent(kind=int(UseragentKind.LANG), value=ua, datestr=datestr, cnt=ua_langs[ua]))\n for ua in ua_distros:\n db.session.add(Useragent(kind=int(UseragentKind.DISTRO), value=ua, datestr=datestr, cnt=ua_distros[ua]))\n db.session.commit()\n\n # update Analytic\n if 'Analytic' in kinds:\n analytic = db.session.query(Analytic).filter(Analytic.datestr == datestr).first()\n if analytic:\n db.session.delete(analytic)\n db.session.commit()\n db.session.add(Analytic(datestr=datestr, cnt=len(clients)))\n db.session.commit()\n\n # for the log\n print('generated for %s: %s' % (datestr, ','.join(kinds)))\n\ndef _main_with_app_context():\n if 'repair-ts' in sys.argv:\n _repair_ts()\n if 'repair-csum' in sys.argv:\n _repair_csum()\n if 'fsck' in sys.argv:\n _fsck()\n if 'ensure' in sys.argv:\n _ensure_tests()\n if 'firmware' in sys.argv:\n _regenerate_and_sign_firmware()\n if 'metadata' in sys.argv:\n _regenerate_and_sign_metadata()\n if 'metadata-embargo' in sys.argv:\n _regenerate_and_sign_metadata(only_embargo=True)\n if 'purgedelete' in sys.argv:\n _delete_embargo_obsoleted_fw()\n _purge_old_deleted_firmware()\n if 'fwchecks' in sys.argv:\n _check_firmware()\n _yara_query_all()\n if 'stats' in sys.argv:\n val = _get_datestr_from_datetime(datetime.date.today() - datetime.timedelta(days=1))\n _generate_stats_for_datestr(val)\n _generate_stats()\n if 'statsmigrate' in sys.argv:\n for days in range(1, 720):\n val = _get_datestr_from_datetime(datetime.date.today() - datetime.timedelta(days=days))\n _generate_stats_for_datestr(val)\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 2:\n print('Usage: %s [metadata] [firmware]' % sys.argv[0])\n sys.exit(1)\n try:\n with app.test_request_context():\n app.config['SERVER_NAME'] = app.config['HOST_NAME']\n g.user = db.session.query(User).filter(User.username == 'anon@fwupd.org').first()\n _main_with_app_context()\n except NotImplementedError as e:\n print(str(e))\n sys.exit(1)\n\n # success\n sys.exit(0)\n","repo_name":"0xfede7c8/lvfs-website","sub_path":"cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":33038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"40250668467","text":"import numpy as np\nfrom keras.utils import plot_model\nfrom keras.applications.imagenet_utils import _obtain_input_shape\nfrom keras.engine.topology import get_source_inputs\nfrom keras.layers import Input, Conv2D, MaxPool2D, GlobalMaxPooling2D, GlobalAveragePooling2D\nfrom keras.layers import Activation, Dense,Lambda,BatchNormalization,Concatenate\nfrom mobilenet_v2 import DepthwiseConv2D\nfrom keras.models import Model\nimport keras.backend as K\nimport os\n\n\n\ndef channel_split(x, name=''):\n # equipartition\n in_channles = x.shape.as_list()[-1]\n ip = in_channles // 2\n c_hat = Lambda(lambda z: z[:, :, :, 0:ip], name='%s/sp%d_slice' % (name, 0))(x)\n c = Lambda(lambda z: z[:, :, :, ip:], name='%s/sp%d_slice' % (name, 1))(x)\n return c_hat, c\n\ndef channel_shuffle(x):\n height, width, channels = x.shape.as_list()[1:]\n channels_per_split = channels // 2\n x = K.reshape(x, [-1, height, width, 2, channels_per_split])\n x = K.permute_dimensions(x, (0,1,2,4,3))\n x = K.reshape(x, [-1, height, width, channels])\n return x\n\ndef shuffle_unit(inputs, out_channels, bottleneck_ratio,strides=2,stage=1,block=1):\n if K.image_data_format() == 'channels_last':\n bn_axis = -1\n else:\n raise ValueError('Only channels last supported')\n\n prefix = 'stage{}/block{}'.format(stage, block)\n bottleneck_channels = int(out_channels * bottleneck_ratio)\n if strides < 2:\n c_hat, c = channel_split(inputs, '{}/spl'.format(prefix))\n inputs = c\n\n x = Conv2D(bottleneck_channels, kernel_size=(1,1), strides=1, padding='same', name='{}/1x1conv_1'.format(prefix))(inputs)\n x = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_1'.format(prefix))(x)\n x = Activation('relu', name='{}/relu_1x1conv_1'.format(prefix))(x)\n x = DepthwiseConv2D(kernel_size=3, strides=strides, padding='same', name='{}/3x3dwconv'.format(prefix))(x)\n x = BatchNormalization(axis=bn_axis, name='{}/bn_3x3dwconv'.format(prefix))(x)\n x = Conv2D(bottleneck_channels, kernel_size=1,strides=1,padding='same', name='{}/1x1conv_2'.format(prefix))(x)\n x = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_2'.format(prefix))(x)\n x = Activation('relu', name='{}/relu_1x1conv_2'.format(prefix))(x)\n\n if strides < 2:\n ret = Concatenate(axis=bn_axis, name='{}/concat_1'.format(prefix))([x, c_hat])\n else:\n s2 = DepthwiseConv2D(kernel_size=3, strides=2, padding='same', name='{}/3x3dwconv_2'.format(prefix))(inputs)\n s2 = BatchNormalization(axis=bn_axis, name='{}/bn_3x3dwconv_2'.format(prefix))(s2)\n s2 = Conv2D(bottleneck_channels, kernel_size=1,strides=1,padding='same', name='{}/1x1_conv_3'.format(prefix))(s2)\n s2 = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_3'.format(prefix))(s2)\n s2 = Activation('relu', name='{}/relu_1x1conv_3'.format(prefix))(s2)\n ret = Concatenate(axis=bn_axis, name='{}/concat_2'.format(prefix))([x, s2])\n\n ret = Lambda(channel_shuffle, name='{}/channel_shuffle'.format(prefix))(ret)\n\n return ret\n\ndef block(x, channel_map, bottleneck_ratio, repeat=1, stage=1):\n x = shuffle_unit(x, out_channels=channel_map[stage-1],\n strides=2,bottleneck_ratio=bottleneck_ratio,stage=stage,block=1)\n\n for i in range(1, repeat+1):\n x = shuffle_unit(x, out_channels=channel_map[stage-1],strides=1,\n bottleneck_ratio=bottleneck_ratio,stage=stage, block=(1+i))\n\n return x\n\nclass ShuffleNetV2():\n\n @staticmethod\n def ShuffleNetV2(input_shape,classes=100,weights=\"trained_model/shufflenetv2.hdf5\"):\n\n img_input = Input(shape=input_shape)\n\n out_dim_stage_two = {0.5:48, 1:116, 1.5:176, 2:244}\n bottleneck_ratio=1\n num_shuffle_units=[3,7,3]\n\n\n exp = np.insert(np.arange(len([3,7,3]), dtype=np.float32), 0, 0) # [0., 0., 1., 2.]\n out_channels_in_stage = 2**exp\n out_channels_in_stage *= out_dim_stage_two[1] # calculate output channels for each stage\n out_channels_in_stage[0] = 24 # first stage has always 24 output channels\n out_channels_in_stage *= 1.0\n out_channels_in_stage = out_channels_in_stage.astype(int)\n\n # create shufflenet architecture\n x = Conv2D(filters=out_channels_in_stage[0], kernel_size=(3, 3), padding='same', use_bias=False, strides=(2, 2),\n activation='relu', name='conv1')(img_input)\n x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same', name='maxpool1')(x)\n\n # create stages containing shufflenet units beginning at stage 2\n for stage in range(len(num_shuffle_units)):\n repeat = num_shuffle_units[stage]\n x = block(x, out_channels_in_stage,\n repeat=repeat,\n bottleneck_ratio=bottleneck_ratio,\n stage=stage + 2)\n\n if bottleneck_ratio < 2:\n k = 1024\n else:\n k = 2048\n x = Conv2D(k, kernel_size=1, padding='same', strides=1, name='1x1conv5_out', activation='relu')(x)\n\n x = GlobalAveragePooling2D(name='global_avg_pool')(x)\n x = Dense(classes, name='fc')(x)\n x = Activation('softmax', name='softmax')(x)\n\n model = Model(img_input, x, name='ShuffleNetV2')\n\n if os.path.isfile(weights):\n model.load_weights(weights)\n print(\"Model loaded\")\n else:\n print(\"No model is found\")\n\n return model","repo_name":"w5688414/keras-aichallenger-2018-plant-recognition","sub_path":"shufflenetv2.py","file_name":"shufflenetv2.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"23948310912","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nimport os\n# Modulos de pyqt5\nfrom PyQt5.Qt import Qt\nfrom PyQt5.QtCore import QSize, QPointF, QPoint, QEvent, Qt as QtCore,\\\n pyqtSignal\nfrom PyQt5.QtWidgets import (QMainWindow, QApplication, QFrame, QLabel,\n QSizePolicy, QGraphicsDropShadowEffect, QSpacerItem,\n QDesktopWidget, QWidget, QHBoxLayout)\nfrom PyQt5.QtGui import QPixmap, QIcon, QFont, QColor, QCursor\n# Modulos para el scraping\nfrom bs4 import BeautifulSoup\nfrom requests import get\n# Para obtener applicacion random\nfrom random import randint\n# Obtener ruta variable de las imgs\nfrom os.path import join, abspath, dirname\n# Guis o modulos locales\nfrom deepinesStore.maing import Ui_MainWindow\nfrom deepinesStore.cardg import Ui_Frame\nfrom deepinesStore.dialog_install import Ui_Form as DInstall\nfrom deepinesStore.about import Dialog as DAbout\n\n# Variables globales\nglobal lista_app, total_apps, lista_inicio, lista_global, lista_selected\nglobal selected_apps, instaladas, columnas, tamanio, repo, contador_selected\n\nclass Ventana(QMainWindow):\n def __init__(self):\n super(Ventana, self).__init__()\n # Inicializamos la gui\n self.ui = Ui_MainWindow(width, height)\n self.ui.setupUi(self)\n self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)\n self.setAttribute(Qt.WA_TranslucentBackground, True )\n self.lista_deepines = ['conkys-widgets', 'deepin-lenguage-patch-es',\n 'dexter-icon-theme', 'frases-celebres', 'firefox-latest',\n 'laboon-access','marea-icon-theme','telegram-desktop',\n 'thunderbird-latest','deepin-language-patch-es']\n \n self.lista_excluir = self.Get_App_Exclude()\n\n global lista_app, selected_apps, instaladas,\\\n lista_global, repo, lista_selected, contador_selected\n repo = self.repo_is_exist()\n if repo:\n # Variables globales\n selected_apps = list()\n lista_selected = {}\n contador_selected = 0\n instaladas = self.apps_instaladas()\n # Almacenamos la lista, para cargarla solo al inicio\n lista_app = self.Get_App()\n if lista_app:\n # Obtenemos aplicaciones para la lista de apps\n self.Apps_inicio(lista_app)\n \n else:\n self.error(\"No ha podido establecer conexión con el servidor,
    \"\n \"por favor verifique su conexión de internet.
    \"\n \"Si el problema persiste, contáctenos vía Telegram
    \"\n \"en @deepinenespanol.

    \"\n \"
    deepinenespañol.org | Copiar enlace
    \"\n \"Visite Deepin en Español para más información.\",\n \"https://deepinenespañol.org\")\n else:\n self.error(\"El repositorio Deepines no está instalado en su sistema,
    \"\n \"Tienda Deepines necesita este repositorio para funcionar.
    \"\n \"En el siguiente enlace encontrará las instrucciones para instalarlo.

    \"\n \"deepinenespañol.org/repositorio/ | Copiar enlace
    \",\n \"https://deepinenespañol.org/repositorio\")\n\n self.ui.btn_install.setEnabled(False)\n self.ui.btn_install.clicked.connect(self.ventana_install)\n self.ui.lbl_list_apps.setText(\"Seleccione las aplicaciones a instalar\")\n self.ui.lbl_list_apps.setEnabled(False)\n self.ui.icon_car.clicked.connect(self.apps_seleccionadas)\n self.ui.lbl_list_apps.clicked.connect(self.apps_seleccionadas)\n self.ui.listWidget.itemClicked.connect(self.listwidgetclicked)\n self.ui.lineEdit.textChanged.connect(self.search_app)\n self.ui.label_2.clicked.connect(self.acerca_de)\n self.ui.btn_cerrar.clicked.connect(self.close)\n self.ui.btn_maximizar.clicked.connect(self.maximize)\n self.ui.btn_minimizar.clicked.connect(self.minimize)\n self.ui.widget_1.installEventFilter(self)\n self.ui.label.clicked.connect(self.acerca_de)\n shadow = QGraphicsDropShadowEffect(self,\n blurRadius=10,\n color=QColor(255,255,255),\n offset=QPointF(0, 0)\n )\n shadow.setXOffset(0)\n shadow.setYOffset(0)\n self.ui.btn_install.setGraphicsEffect(shadow)\n \n self.center()\n\n\n \n ################################################\n # Repo en sistema #\n\n def repo_is_exist(self):\n if os.path.exists(\"/etc/apt/sources.list.d/deepines.list\"):\n return True\n else:\n return False\n \n # /Repo en sistema #\n ################################################\n\n ################################################\n # Control de errores #\n\n def error(self, text: str, enlace: str):\n self.horizontalLayout = QHBoxLayout()\n self.horizontalLayout.setContentsMargins(0, 40, 0, 0)\n self.horizontalLayout.setSpacing(0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n \n self.raccoon = QLabel(self)\n self.raccoon.setText(\"\")\n self.raccoon.setMinimumSize(300, 300)\n self.raccoon.setMaximumSize(300, 300)\n self.raccoon.setObjectName(\"raccoon\")\n self.raccoon.setStyleSheet(\"#raccoon{\"\n \"background-color: transparent;}\")\n\n ruta = abspath(join(dirname(__file__), 'resources', 'raccoon.svg'))\n pixmap = QPixmap(ruta)\n self.raccoon.setPixmap(pixmap)\n self.horizontalLayout.addWidget(self.raccoon)\n self.ui.gridLayout.addLayout(self.horizontalLayout, 1, 1, 1, 1)\n \n\n self.label_error = QLabelClickable(self)\n sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_error.sizePolicy().hasHeightForWidth())\n self.label_error.setSizePolicy(sizePolicy)\n font = QFont()\n font.setPointSize(16)\n self.text = (\"\"\" \n

    \"\"\"\n + text +\n \"\"\"

    \"\"\")\n self.label_error.setFont(font)\n self.label_error.setScaledContents(True)\n self.label_error.setText(self.text)\n self.label_error.setStyleSheet(\"background-color: transparent;\\n\"\n \"color: white;\")\n self.label_error.setEnabled(True)\n self.label_error.setAlignment(QtCore.AlignCenter)\n self.label_error.setObjectName(\"label_error\")\n self.label_error.clicked.connect(lambda:\n QApplication.clipboard().setText(enlace))\n self.ui.gridLayout.addWidget(self.label_error, 2, 1, 1, 1)\n self.ui.listWidget.setEnabled(False)\n self.ui.frame_4.setEnabled(False)\n \n # /Control de errores #\n ################################################\n\n def resizeEvent(self, event):\n\n if repo and lista_app:\n self.Listar_Apps(lista_global)\n\n ################################################\n # Busqueda de apps #\n\n def search_app(self):\n text = self.ui.lineEdit.text()\n\n lista_search = {}\n contador = 0\n global lista_global\n if len(text) != 0 and len(text) > 2:\n self.ui.listWidget.clearSelection()\n for elemento in lista_app:\n if elemento[0] not in self.lista_excluir and elemento[0].startswith(text) or text in elemento[1]:\n indice = lista_app.index(elemento)\n item = lista_app[indice]\n lista_search[contador] = item\n contador += 1\n else:\n lista_global = lista_search\n else:\n lista_global = lista_inicio\n self.Listar_Apps(lista_global)\n \n def clear_search_txt(self):\n self.ui.lineEdit.setText(\"\")\n\n # /Busqueda de apps #\n ################################################\n\n ################################################\n # Filtro de apps #\n\n def listwidgetclicked(self, item):\n filtro = list() # Limpiamos la lista\n global lista_global\n\n if item.text() == \"Inicio\":\n self.Listar_Apps(lista_inicio)\n filtro.append(\"inicio\")\n elif item.text() == \"Deepines\":\n filtro.append(\"deepines\")\n elif item.text() == \"Internet\":\n filtro.append(\"web\")\n filtro.append(\"net\")\n filtro.append(\"mail\")\n filtro.append(\"networking\")\n filtro.append(\"network\")\n elif item.text() == \"Multimedia\":\n filtro.append(\"sound\")\n filtro.append(\"audio\")\n filtro.append(\"video\")\n elif item.text() == \"Gráficos\":\n filtro.append(\"graphics\")\n filtro.append(\"media\")\n elif item.text() == \"Juegos\":\n filtro.append(\"games\")\n elif item.text() == \"Ofimática\":\n filtro.append(\"editors\")\n elif item.text() == \"Desarrollo\":\n filtro.append(\"devel\")\n filtro.append(\"shells\")\n elif item.text() == \"Sistema\":\n filtro.append(\"admin\")\n filtro.append(\"python\")\n elif item.text() == \"Otros\":\n filtro.append(\"otros\")\n \n\n if \"inicio\" not in filtro:\n global lista_global\n lista_global = self.Get_App_Filter(lista_app, filtro)\n self.Listar_Apps(lista_global)\n else:\n lista_global = lista_inicio\n\n self.clear_search_txt()\n\n # /Filtro de apps #\n ################################################\n\n ################################################\n # Lista de apps #\n\n # Obtener lista de apps #\n def Get_App(self):\n \n # Asignamos la url\n URL = \"http://repositorio.deepines.com/pub/deepines/4/paquetes.html\"\n \n try:\n # Realizamos la petición a la web\n req = get(URL, timeout=10)\n\n # Comprobamos que la petición nos devuelve un Status Code = 200\n status_code = req.status_code\n if status_code == 200:\n\n # Pasamos el contenido HTML de la web a un objeto BeautifulSoup()\n html = BeautifulSoup(req.text, \"html.parser\")\n\n # Obtenemos todos los divs donde están las entradas\n entradas = html.find_all('tr')\n \n lista = list()\n global total_apps\n total_apps = 0\n # Recorremos todas las entradas para extraer el título, autor y fecha\n for i, entrada in enumerate(entradas):\n # Con el método \"getText()\" no nos devuelve el HTML\n titulo = entrada.find('td', {'class': 'package'}).getText()\n descripcion = entrada.find('td', {'class': 'description'}).getText()\n version = entrada.find('td', {'class': 'version'}).getText()\n categoria = entrada.find('td', {'class': 'section'}).getText()\n estado = 1\n\n if titulo not in self.lista_excluir:\n lista_origen = [titulo, descripcion, version, categoria, estado]\n lista.append(lista_origen)\n \n total_apps += 1\n\n return lista\n except:\n pass\n\n # Filtrar aplicaciones #\n def Get_App_Filter(self, lista_app, filtro):\n lista_filtrada = {}\n contador = 0\n filtros = ['web','net','mail','sound','audio','video',\n 'graphics','media','games','editors','devel','shell',\n 'admin','python','network','networking']\n if 'deepines' in filtro:\n for app in self.lista_deepines:\n for elemento in lista_app:\n if elemento[0] == app :\n lista_filtrada[contador] = elemento\n contador += 1\n else:\n if \"otros\" not in filtro:\n for elemento in lista_app:\n categoria_app = elemento[3].lower().split(\"/\")\n for filtro_uno in categoria_app:\n if filtro_uno in filtro:\n lista_filtrada[contador] = elemento\n contador += 1\n else:\n for elemento in lista_app:\n if elemento[3].lower() not in filtros:\n lista_filtrada[contador] = elemento\n contador += 1\n\n return lista_filtrada\n \n # Aplicaciones Inicio #\n def Apps_inicio(self, lista_app):\n global total_apps, lista_inicio, lista_global\n lista_inicio = {}\n lista_key = []\n contador = True\n while contador:\n if len(lista_key) == 8:\n contador = False\n else:\n key = randint(0, (total_apps-1))\n if key not in lista_key:\n lista_key.append(key)\n\n contador = 0\n for key in lista_key:\n lista_inicio[contador] = lista_app[key]\n contador += 1\n\n lista_global = lista_inicio\n\n # Listar aplicaciones #\n def Listar_Apps(self, lista):\n equal = lista_inicio == lista_global\n if equal: \n item = self.ui.listWidget.item(0)\n item.setSelected(True)\n\n while self.ui.gridLayout.count():\n item = self.ui.gridLayout.takeAt(0)\n widget = item.widget()\n if widget:\n widget.deleteLater()\n\n y = 0 # Creamos la coordenada y\n x = 0 # Creamos la coordenada x \n # Estas para establecer la ubicacion de la tarjetas en la grilla\n i = 0\n self.calcular_columnas()\n for key in lista: # Recorremos la lista con los elementos\n i += 1 # Contador para agregar el espaciador horizontal\n # Consultamos si ya tenemos tres tarjetas en y\n if y % columnas == 0 and y != 0:\n y = 0 # Reiniciamos y\n x += 1 # Agregamos otra columna\n y += 1 # agregamos 1 a la coordenada y\n\n # Creamos una instancia de la clase card\n carta = Card(lista[key][0], lista[key][1], lista[key][2], lista[key][4], self)\n # Agregamos dicha instancia a la grilla\n self.ui.gridLayout.addWidget(carta, x, y, 1, 1)\n self.ui.frame.verticalScrollBar().setSliderPosition(0)\n\n # Espaciador vertical\n spacerItem9 = QSpacerItem(0, 0, QSizePolicy.Minimum, QSizePolicy.Expanding)\n self.ui.gridLayout.addItem(spacerItem9, (x+1), 1, 1, 1)\n\n # Si tenemos menos apps que las columnas, agregamos el espaciador\n if i < columnas:\n # Espaciador horizontal\n spacerItem8 = QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum)\n self.ui.gridLayout.addItem(spacerItem8, x, columnas, 1, 10)\n\n # Lista aplicaciones excluidas #\n def Get_App_Exclude(self):\n lista = list()\n ruta_omitidos = abspath(join(dirname(__file__), 'omitidos.txt'))\n excluidos = open(ruta_omitidos, 'r')\n\n for line in excluidos:\n line = line.replace('\\n', '')\n lista.append(line)\n \n return lista\n\n # /Lista de apps #\n ################################################\n\n ################################################\n # Calcular columnas #\n def calcular_columnas(self):\n if width < 1360:\n base = 180\n elif width >= 1360 and width < 2500:\n base = 210\n elif width > 2500:\n base = 420\n\n ancho = self.ui.frame.frameGeometry().width()\n global columnas, tamanio\n \n if ancho < 700: ancho = ancho_inicio\n \n columnas = ancho // (base + 40)\n restante = ancho % (base + 40)\n tamanio = base + (restante // columnas)\n\n def calcular_anchos(self):\n width_screen = int(width * 0.7)\n if width_screen < 945: width_screen = 945\n\n size_frame = int(width * 0.14)\n if size_frame < 200: size_frame = 200\n if size_frame > 300: size_frame = 300\n global ancho_inicio\n ancho_inicio = width_screen - size_frame\n\n\n # /Calcular columnas #\n ################################################\n\n def contar_apps(self):\n global selected_apps\n cuenta = len(selected_apps)\n if cuenta == 0:\n texto = \"Seleccione las aplicaciones a instalar\"\n borde = \"border: 2px solid rgb(45, 45, 45);\"\n r, g, b = 255, 255, 255\n cursor = QtCore.ArrowCursor\n enabled = False\n pix_car = \"carDisable.svg\"\n else:\n borde = \"border: 2px solid #419fd9;\"\n r, g, b = 0, 255, 255\n cursor = QtCore.PointingHandCursor\n enabled = True\n pix_car = \"carEnable.svg\"\n\n if cuenta != 1:\n acentuacion, articulo, plural = \"o\", \"es\", \"s\"\n else:\n acentuacion, articulo, plural = \"ó\", \"\", \"\"\n texto = \"{} aplicaci{}n{} seleccionada{} para instalar, clic aquí para verla{}\".format(\n cuenta, acentuacion, articulo, plural, plural)\n\n self.ui.btn_install.setEnabled(enabled)\n self.ui.lbl_list_apps.setEnabled(enabled)\n self.ui.lbl_list_apps.setCursor(QCursor(cursor))\n \n pix_car = QPixmap(abspath(join(dirname(__file__), 'resources', pix_car)))\n self.ui.icon_car.setPixmap(pix_car)\n\n estilo = (\"#btn_install{\\n\"\n \"color: #fff;\\n\"\n \"padding: 2px;\\n\"\n \"border-radius: 5px;\\n\"\n \"background-color: rgb(45, 45, 45);\\n\"\n + borde +\n \"}\\n\"\n \"#btn_install:hover{\\n\"\n \"padding: 2px;\\n\"\n \"color:white;\\n\"\n \"background-color: rgb(65, 159, 217);\\n\"\n \"border: 1px solid rgb(142, 231, 255);\\n\"\n \"border-radius: 5px;\\n\"\n \"}\")\n self.ui.btn_install.setStyleSheet(estilo)\n\n shadow = QGraphicsDropShadowEffect(self,\n blurRadius=10,\n color=QColor(r,g,b),\n offset=QPointF(0, 0)\n )\n shadow.setXOffset(0)\n shadow.setYOffset(0)\n self.ui.btn_install.setGraphicsEffect(shadow)\n\n\n self.ui.lbl_list_apps.setText(texto)\n\n ################################################\n # Instalacion #\n\n def ventana_install(self):\n global selected_apps\n self.modal = DInstall(self, selected_apps)\n self.modal.show()\n\n # /Instalacion #\n ################################################\n\n ################################################\n # Acerca de #\n\n def acerca_de(self):\n self.modal = DAbout(self)\n self.modal.show()\n\n # /cerca de #\n ################################################\n\n ################################################\n # Centrar #\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n #mover = QPoint(self.x(), 100)\n self.move(qr.topLeft())\n #self.move(self.x(), self.y() - mover.y())\n\n # /Centrar #\n ################################################\n\n ################################################\n # Apps Instaladas #\n\n def apps_instaladas(self):\n comando = \"dpkg --get-selections | grep -w install\"\n lista = os.popen(comando)\n instaladas = list()\n for linea in lista.readlines():\n linea = ''.join(linea.split())\n linea = linea.replace(\"install\", \"\")\n instaladas.append(linea)\n\n return(instaladas)\n \n\n # /Apps Instaladas #\n ################################################\n\n ################################################\n # Apps nuevas Instaladas #\n\n def instalacion_completada(self):\n global selected_apps, instaladas\n lista_complete = {}\n contador = 0\n for app in selected_apps:\n for elemento in lista_app:\n if elemento[0] == app: \n indice = lista_app.index(elemento)\n item = lista_app[indice]\n lista_complete[contador] = item\n contador += 1\n \n instaladas.append(app)\n selected_apps = list()\n self.Listar_Apps(lista_complete)\n self.contar_apps()\n\n\n # /Apps nuevas Instaladas #\n ################################################\n\n\n def eventFilter(self, obj, event):\n if event.type() == QEvent.MouseButtonPress:\n self.oldPos = event.globalPos()\n elif event.type() == QEvent.MouseMove:\n delta = QPoint(event.globalPos() - self.oldPos)\n self.move(self.x() + delta.x(), self.y() + delta.y())\n self.oldPos = event.globalPos()\n \n return True\n\n\n def maximize(self):\n global maximized\n if maximized: # Restauramos al tamaño original\n self.setWindowState(Qt.WindowNoState)\n maximized = False\n icono = abspath(join(dirname(__file__), 'resources', 'maximizar.svg'))\n self.ui.widget_1.installEventFilter(self)\n else: # Agrandamos la ventana\n self.setWindowState(Qt.WindowMaximized)\n maximized = True\n icono = abspath(join(dirname(__file__), 'resources', 'restaurar.svg'))\n self.ui.widget_1.removeEventFilter(self)\n\n # Cambio de icono al maximizar\n icon1 = QIcon()\n icon1.addPixmap(QPixmap(icono), QIcon.Normal, QIcon.Off)\n self.ui.btn_maximizar.setIcon(icon1)\n\n\n def minimize(self):\n global maximized\n # No se ve la app, agrandamos\n self.setWindowState(Qt.WindowMinimized)\n if maximized: # Si estaba maximizada, agrandamos\n self.setWindowState(Qt.WindowMaximized)\n\n def apps_seleccionadas(self):\n self.Listar_Apps(lista_selected)\n self.ui.listWidget.clearSelection()\n\n\nclass QLabelClickable(QLabel):\n\n clicked = pyqtSignal()\n \n def __init__(self, *args):\n QLabel.__init__(self, *args)\n \n def mouseReleaseEvent(self, ev):\n self.clicked.emit()\n\n################################################\n# Card para la aplicacion #\n\nclass Card(QFrame):\n def __init__(self, titulo: str, descripcion: str, version: str, estado: int, parent):\n super(Card, self).__init__()\n self.parentWindow = parent\n self.cd = Ui_Frame()\n self.cd.setupUi(self)\n # Establecemos los atributos de la app\n #self.cd.btn_select_app.setToolTip(version)\n self.titulo = titulo\n self.version = version\n self.cd.lbl_name_app.setText(self.titulo)\n self.cd.image_app.setToolTip(\"

    {}

    \".format(descripcion))\n self.cd.image_app.setWordWrap(True)\n self.setMinimumSize(QSize(tamanio+30, int((tamanio+115)*0.72222)))\n self.setMaximumSize(QSize(tamanio+30, int((tamanio+115)*0.72222)))\n self.cd.image_app.setMinimumSize(QSize(tamanio, int(tamanio*0.72222)))\n\n self.texto_version()\n\n global instaladas\n if self.titulo not in instaladas:\n estado = 1\n if self.titulo in selected_apps:\n estado = 0\n else:\n estado = 2\n \n if self.titulo not in selected_apps and self.titulo not in instaladas:\n self.installEventFilter(self)\n\n self.change_color_buton(estado)\n # Consultamos si existe el grafico de la app\n ruta = abspath(join(dirname(__file__), 'resources/apps', self.titulo + '.svg'))\n if not os.path.exists(ruta):\n url = abspath(join(dirname(__file__), 'resources/apps', 'no-img.svg'))\n else:\n url = ruta\n # Establecemos la imagen\n pixmap = QPixmap(url)\n self.cd.image_app.setPixmap(pixmap)\n self.cd.btn_select_app.clicked.connect(lambda: self.select_app(self.titulo))\n self.cd.image_app.clicked.connect(lambda: self.select_app(self.titulo))\n self.cd.lbl_name_app.clicked.connect(lambda: self.select_app(self.titulo))\n\n def eventFilter(self, object, event):\n if event.type() == QEvent.Enter:\n radius = 20\n elif event.type() == QEvent.Leave:\n radius = 0\n else:\n return False\n\n shadow = QGraphicsDropShadowEffect(self,\n blurRadius=radius,\n color=QColor(255,255,255),\n offset=QPointF(0, 0))\n shadow.setXOffset(0)\n shadow.setYOffset(0)\n self.setGraphicsEffect(shadow)\n return True\n \n def texto_version(self):\n if self.titulo in selected_apps:\n self.cd.btn_select_app.setText(\"Seleccionada\")\n color = \"color: rgb(0, 255, 255);\"\n elif self.titulo in instaladas:\n self.cd.btn_select_app.setText(\"Instalada\")\n color = \"color: rgb(0, 212, 0);\"\n else:\n self.cd.btn_select_app.setText(\"v: {}\".format(self.version))\n color = \"color: rgb(107,107,107);\"\n\n self.cd.btn_select_app.setStyleSheet(\"border: transparent;\\n\"\n \"background-color: transparent;\"\n + color + \n \"border-bottom-right-radius:5px; border-bottom-left-radius:5px;\"\n \"margin-bottom: 5px;\")\n\n def select_app(self, titulo):\n global selected_apps, lista_app, instaladas, lista_selected, contador_selected\n \n for elemento in lista_app:\n if titulo in elemento: \n indice = lista_app.index(elemento)\n\n # Si la app no esta instalada\n if titulo not in instaladas:\n # Si la app no esta seleccionada\n if titulo not in selected_apps:\n selected_apps.append(titulo)\n \n \n for elemento in lista_app:\n if elemento[0] == titulo: \n indice = lista_app.index(elemento)\n item = lista_app[indice]\n lista_selected[contador_selected] = item\n contador_selected += 1\n\n lista_app[indice][4] = 0\n self.change_color_buton(0)\n self.removeEventFilter(self)\n else:\n selected_apps.remove(titulo)\n \n\n count = 0\n for elemento in lista_selected:\n titulo_elemento = lista_selected[elemento]\n\n if titulo_elemento[0] == titulo: \n eliminar = elemento\n count += 1\n \n lista_selected.pop(eliminar)\n \n\n lista_app[indice][4] = 1\n self.change_color_buton(1)\n self.installEventFilter(self)\n else:\n self.change_color_buton(2)\n\n self.texto_version()\n self.parentWindow.contar_apps()\n\n def change_color_buton(self, estado: int):\n if estado == 0: # App seleccionada RGB(0,255,255)\n r, g, b=0, 255, 255\n radio = 20\n border_color = \"border-color: #00bbc8;\"\n elif estado == 1: # App no seleccionada RGB(45,45,45)\n r, g, b=45, 45, 45\n radio = 0\n border_color = \"border-color: transparent;\"\n else: # app instalada RGB(0,212,0)\n r, g, b=0, 212, 0\n radio = 20\n border_color = \"border-color: #009800;\"\n self.cd.btn_select_app.setEnabled(False)\n\n self.setStyleSheet(\"#Frame{\"\n \"background-color: #2d2d2d;\"\n \"border-radius: 10px;\"\n \"margin: 10px;\"\n + border_color +\n \"border-width: 1px;\"\n \"border-style: solid;\"\n \"}\"\n \"QToolTip {\"\n \"border: 2px solid transparent;\"\n \"border-radius: 4px;\"\n \"font-size: 12px;\"\n \"background-color: rgb: 63, 63, 63;\"\n \"})\")\n\n shadow = QGraphicsDropShadowEffect(self,\n blurRadius=radio,\n color=QColor(r, g, b),\n offset=QPointF(0, 0)\n )\n shadow.setXOffset(0)\n shadow.setYOffset(0)\n self.setGraphicsEffect(shadow)\n\n\n \n \n# /Card para la aplicacion #\n################################################\n\ndef ejecutar():\n app = QApplication(sys.argv)\n global width, height, maximized\n maximized = False\n screen_rect = app.desktop().screenGeometry()\n width, height = screen_rect.width(), screen_rect.height()\n win = Ventana()\n win.calcular_anchos()\n os.system('xprop -f _KDE_NET_WM_BLUR_BEHIND_REGION 32c -set _KDE_NET_WM_BLUR_BEHIND_REGION 0 -id {}'.format(int(win.winId())))\n win.show()\n sys.exit(app.exec_())\n","repo_name":"s384/store_deepines_archived","sub_path":"deepinesStore/tienda.py","file_name":"tienda.py","file_ext":"py","file_size_in_byte":29930,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"70788911946","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def pruneTree(self, root: TreeNode) -> TreeNode:\n\n def contains_one(node: TreeNode) -> None:\n\n # End of the line, do not modify tree\n if node is None: return False\n\n # Call function on subtrees\n left = contains_one(node.left)\n right = contains_one(node.right)\n\n # If the subtrees do not contain a one, prune them\n if not left: node.left = None\n if not right: node.right = None\n\n # return 1 if val = 1 or any subtree val = 1\n return node.val or left or right\n\n # prune\n contains_one(root)\n\n # No ones in tree\n if (not root.left) and (not root.right) and (root.val == 0): root = None\n\n return root\n\n","repo_name":"Drblessing/leetcode","sub_path":"Binary Tree Pruning.py","file_name":"Binary Tree Pruning.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15681830710","text":"class Solution:\n def numMatchingSubseq(self, s: str, words: List[str]) -> int:\n @lru_cache(None)\n def exists(char, index):\n return s.find(char, index)\n\n def isSubsequence(word):\n index = -1\n for ch in word:\n index = exists(ch, index + 1)\n if index == -1:\n return False\n return True\n\n count = 0\n for word in words:\n if isSubsequence(word):\n count+= 1\n return count","repo_name":"Tek58/Leetcode","sub_path":"792-number-of-matching-subsequences/792-number-of-matching-subsequences.py","file_name":"792-number-of-matching-subsequences.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5369925353","text":"#coding=utf-8\n#!python3\n# parse the json file\n\nimport os\nimport json\nimport jsonlines\nimport requests\nROOT = '/data/yd_data/skin-quality/raw_data/' # json文件的目录\nFILEPATH = ROOT + 'face_data_20200218.json' ## 可跳过json预处理,使用新的json文文件\n## FILEPATH = ROOT + 'face_data_20200218_new.json' ##FILEPATH修改为此行,并修改主函数中的路径\n\nPICPATH = ROOT + 'imgs' # 下载的图像的目录\nLABELPATH = ROOT+'labels' # 生成的label的目录\n\nimport logging\nlogging.basicConfig(filename = 'mylog.txt',level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s') \nlogging.disable(logging.DEBUG) \n\n# 通过记录中的url字段下载图像\ndef downloadPicByUrl(url,path):\n\n r = requests.request('get',url) # \n #print(r.status_code)\n with open(path,'wb') as f: \n f.write(r.content)\n f.close()\n\n# json文件预处理\ndef presub(filepath):\n f = open(filepath, 'r', encoding='utf-8')\n lines = f.read()\n f.close()\n \n print(lines[-10:])\n \n lines_json = lines.replace('jpg\\\"}{\\\"','jpg\\\"}\\n{\\\"') # 将原先无逗号分割的单行json数据替换为每行包含一条数据的新json文件方便处理\n \n newpath = filepath[:-5]+'_new'+filepath[-5:] # 新json文件重命名\n logging.debug('filepath: %s'%newpath)\n \n n = open(newpath, 'w', encoding='utf-8')\n n.write(lines_json)\n n.close()\n return newpath # 返回新json文件的目录\n \n## 处理stain,black head和pimple\ndef stainEtc(report_id,value,rectangle,TXT): \n TXT.write('%d\\t%d\\n'%(report_id,value)) # report_id为图像id,value为色斑(黑头、痘痘)个数\n for i in range(value): # 写入每个色斑(黑头、痘痘)的bounding box信息\n # logging.info('width\\t%s\\n'%type(rectangle[i]['width']))\n # logging.info('top\\t%s\\n'%type(rectangle[i]['top']))\n # logging.info('height\\t%s\\n'%type(rectangle[i]['height']))\n # logging.info('left\\t%s\\n'%type(rectangle[i]['left']))\n TXT.write('%f\\t\\t%f\\t\\t%f\\t\\t%f\\n'%(rectangle[i]['width'],rectangle[i]['top'],rectangle[i]['height'],rectangle[i]['left']))\n TXT.write('\\n')\n\n## 处理skin color和skin type\ndef skinColorType(report_id,skin_value,TXT):\n TXT.write('%d\\t%s\\n'%(report_id,skin_value))\n\n## 处理skin age\ndef skinAge(report_id,skin_age,TXT):\n TXT.write('%d\\t%d\\n'%(report_id,skin_age))\n\n## 处理rose_acne\ndef roseAcne(report_id,rose_acne,TXT):\n ret = [0]*5\n for i in range(len(rose_acne)):\n key = rose_acne[i]['type']\n val = rose_acne[i]['value']\n ret[key] = val\n #print(ret)\n TXT.write('%d\\t'%report_id)\n for i in range(5):\n TXT.write('%d '%ret[i])\n TXT.write('\\n')\n \n## 处理coarse pore\ndef coarsePore(report_id,coarse_pore,TXT):\n ret = [0]*4\n for i in range(len(coarse_pore)):\n key = coarse_pore[i]['type']\n val = coarse_pore[i]['value']\n ret[key] = val\n #print(ret)\n TXT.write('%d\\t'%report_id)\n for i in range(4):\n TXT.write('%d '%ret[i])\n TXT.write('\\n')\n\n## 处理wrinkle\ndef wrinkle(report_id,wrinkle_detail,TXT):\n ret_val = [0]*5\n ret = [[0,0],[0,0],[0,0],[0,0]]\n for item in wrinkle_detail:\n \n if(item['type']==0):\n ret_val[0] = item['value']\n else:\n ret_val[item['type']] = item['value']\n for t in item['detail']:\n ret[item['type']-1][t['detail_type']] = t['value']\n\n \n TXT.write('%d\\n'%report_id)\n for i in ret_val:\n TXT.write('%d '%i)\n TXT.write('\\n')\n for i in range(4):\n for j in ret[i]:\n TXT.write('%d '%j)\n TXT.write('\\n')\n TXT.write('\\n')\n\n## 处理black eye\ndef blackEye(report_id,black_eye_detail,TXT):\n ret = [0,0]\n ret_val = [[0,0],[0,0]]\n for item in black_eye_detail:\n\n ret[item['type']] = item['value']\n for t in item['detail']:\n # print(t)\n # print(type(t))\n ret_val[item['type']][0] = t['detail_type']\n ret_val[item['type']][1] = t['level']\n\n \n TXT.write('%d\\n'%report_id)\n for i in ret:\n TXT.write('%d '%i)\n TXT.write('\\n')\n for i in range(2):\n for j in ret_val[i]:\n TXT.write('%d '%j)\n TXT.write('\\n')\n TXT.write('\\n')\n\ndef processjson(filepath):\n with open(filepath, \"r+\", encoding=\"utf8\") as f:\n line = 0\n \n ## 创建10个txt文件用于存储labels\n stainTXT = open(os.path.join(LABELPATH,'stain.txt'),'w',encoding=\"utf8\")\n blackHeadTXT = open(os.path.join(LABELPATH,'black_head.txt'),'w',encoding=\"utf8\")\n pimpleTXT = open(os.path.join(LABELPATH,'pimple.txt'),'w',encoding=\"utf8\")\n skinTypeTXT = open(os.path.join(LABELPATH,'skin_type.txt'),'w',encoding=\"utf8\")\n skinColorTXT = open(os.path.join(LABELPATH,'skin_color.txt'),'w',encoding=\"utf8\")\n skinAgeTXT = open(os.path.join(LABELPATH,'skin_age.txt'),'w',encoding=\"utf8\")\n roseAcneTXT = open(os.path.join(LABELPATH,'rose_acne.txt'),'w',encoding=\"utf8\")\n coarsePoreTXT = open(os.path.join(LABELPATH,'coarse_pore.txt'),'w',encoding=\"utf8\")\n wrinkleTXT = open(os.path.join(LABELPATH,'wrinkle.txt'),'w',encoding=\"utf8\")\n blackEyeTXT = open(os.path.join(LABELPATH,'black_eye.txt'),'w',encoding=\"utf8\")\n \n for item in jsonlines.Reader(f): # 逐行读取处理后的json文件,每一行包含了一张图像的所有标签信息\n \n ## test\n # print(item['report_id'])\n # continue\n \n ## fixed\n photo_url = item['photo_url'] # 获取图像的url用于下载,\n report_id = item['report_id'] # 获取图像的report_id,为int类型\n \n ## \n ## log\n line = line + 1\n logging.info('%d\\t%d'%(line,report_id))\n \n print('%d\\t%d'%(line,report_id)) # 输入处理的图像的序号的reort_id\n\n ## 读取10个子任务的具体标签信息\n stain_detail = item['stain_detail'] \n black_head_detail = item['black_head_detail']\n pimple_detail = item['pimple_detail']\n\n skin_type = item['skin_type']\n skin_color = item['skin_color']\n skin_age = item['skin_age']\n\n coarse_pore_detail = item['coarse_pore_detail']\n rose_acne_detail = item['rose_acne_detail']\n\n wrinkle_detail = item['wrinkle_detail']\n black_eye_detail = item['black_eye_detail']\n \n \n \n ## 下载图像,后续只用于生成labelse时可注释掉,无需重复下载\n picpath = os.path.join(PICPATH,'%d.jpg'%report_id)\n logging.debug('photo_url: %s'%photo_url)\n logging.debug('report_id: %d'%report_id)\n logging.debug('%s'%picpath)\n downloadPicByUrl(photo_url,picpath) # download pictures named by report_id\n \n ## stain\n stain_detail = json.loads(stain_detail) ## stain_detail原本为str格式,转化为字典\n if(not stain_detail): ## 若为空字典,则色斑个数为0,bounding box为空列表\n value = 0\n rectangle = []\n else:\n rectangle = stain_detail['rectangle'] ## type list \n value = stain_detail['value'] ## type int\n stainEtc(report_id,value,rectangle,stainTXT)\n \n ## black_head 与stain一致\n black_head_detail = json.loads(black_head_detail)\n if(not black_head_detail):\n value = 0\n rectangle = []\n else:\n rectangle = black_head_detail['rectangle'] ## type list \n value = black_head_detail['value'] ## type int\n stainEtc(report_id,value,rectangle,blackHeadTXT)\n \n ## pimple 与stain一致\n pimple_detail = json.loads(pimple_detail)\n \n if(not pimple_detail):\n value = 0\n rectangle = []\n else:\n rectangle = pimple_detail['rectangle'] ## type list \n value = pimple_detail['value'] ## type int\n stainEtc(report_id,value,rectangle,pimpleTXT)\n \n logging.info('stain black head and pimple processed')\n \n ## skin_type\n skinColorType(report_id,skin_type,skinTypeTXT)\n ## skin color\n skinColorType(report_id,skin_color,skinColorTXT)\n ## skin_age\n skinAge(report_id,skin_age,skinAgeTXT)\n \n logging.info('skin type color and age processed')\n \n ## rose_acne\n rose_acne_detail = json.loads(rose_acne_detail)\n roseAcne(report_id,rose_acne_detail,roseAcneTXT)\n \n ## coarse_pore\n coarse_pore_detail = json.loads(coarse_pore_detail)\n coarsePore(report_id,coarse_pore_detail,coarsePoreTXT)\n \n logging.info('coarse pore and rose acne processed')\n \n ## wrinkle\n wrinkle_detail = json.loads(wrinkle_detail)\n wrinkle(report_id,wrinkle_detail,wrinkleTXT)\n \n ## black_eye\n\n # print(black_eye_detail)\n black_eye_detail = json.loads(black_eye_detail)\n blackEye(report_id,black_eye_detail,blackEyeTXT)\n \n logging.info('wrinkle and black eye processed')\n \n stainTXT.close()\n blackHeadTXT.close()\n pimpleTXT.close()\n skinTypeTXT.close()\n skinColorTXT.close()\n skinAgeTXT.close()\n roseAcneTXT.close()\n coarsePoreTXT.close()\n wrinkleTXT.close()\n blackEyeTXT.close() \n\n\nif __name__ == '__main__':\n NEWPATH = presub(FILEPATH)\n processjson(NEWPATH)\n \n \n \n","repo_name":"yxy17/skin-quality","sub_path":"dataset/parseJsonFile.py","file_name":"parseJsonFile.py","file_ext":"py","file_size_in_byte":10184,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"73773254344","text":"\"\"\"\r\nThis file offers functions related to image processing\r\n\r\nOpenCV-package required\r\n\r\nAuthors:\r\n Florens Helfferich F.J.Helfferich@student.tudelft.nl\r\n Bram Pronk I.B.Pronk@student.tudelft.nl\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nfrom math import sqrt\r\nfrom configparser import ConfigParser\r\n\r\n\r\ndef reduce_size(in_image, scale_percent=50):\r\n \"\"\"\r\n Reduces the resolution of the image\r\n output is another image of the same type (ndarray)\r\n \"\"\"\r\n height = int(in_image.shape[0])\r\n width = int(in_image.shape[1])\r\n new_height = int(height * scale_percent / 100)\r\n new_width = int(width * scale_percent / 100)\r\n dsize = (new_width, new_height)\r\n return cv2.resize(in_image, dsize, interpolation=cv2.INTER_CUBIC)\r\n\r\n\r\ndef lpf(in_image, size_blur=3):\r\n \"\"\"\r\n Low pass filtering the image\r\n used in position_from_image if lpf argument is 'yes'\r\n output is another image of the same type (ndarray)\r\n \"\"\"\r\n if size_blur < 3:\r\n size_blur = 3\r\n kernel = np.ones((size_blur, size_blur), np.float32) / (size_blur*size_blur)\r\n return cv2.filter2D(in_image, -1, kernel)\r\n\r\n\r\ndef orientation(line: tuple, mid_height: int) -> (float, float):\r\n \"\"\"\"\r\n Finds the orientation of a given line (x1, y1, x2, y2)\r\n used in position_from_image as one of its outputs\r\n returns the x-direction and y-direction separately as if each line is a vector in 2D\r\n \"\"\"\r\n x1 = line[0]\r\n y1 = line[1] - mid_height\r\n x2 = line[2]\r\n y2 = line[3] - mid_height\r\n abs_r = sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return (x2-x1)/abs_r, (y2-y1)/abs_r\r\n\r\n\r\ndef line_numbering(in_image, lines_array):\r\n \"\"\"\"\r\n Numbers the lines and puts them in the given 'in_image' image\r\n used in position_from_image when the argument show is 'yes'\r\n output is another image of the same type (ndarray)\r\n \"\"\"\r\n lines = lines_array\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n fontScale = 0.5\r\n lightblue = (255, 255, 0)\r\n thickness = 1\r\n for i in range(len(lines)):\r\n xi = lines[i, 2]\r\n yi = lines[i, 3]\r\n origin = (xi, yi)\r\n in_image = cv2.putText(in_image, str(i), origin, fontFace=font, fontScale=fontScale, color=lightblue, thickness=thickness, lineType=cv2.LINE_AA, bottomLeftOrigin=False)\r\n return in_image\r\n\r\n\r\ndef position_from_image(in_image, configpath: str, flip='no', filtering='no', show='no') -> (tuple, tuple):\r\n \"\"\"\r\n Version 1: provides position feedback by just returning x2 y2 of last line and its orientation (tip_pos, tip_dir)\r\n - in_image: the path to the image or the image itself from which the position is read\r\n - configpath: the path to the config file with all image processing settings\r\n - filtering (optional): 'yes' if a 3x3 low-pass filter improves line detection see 'lpf' function above\r\n - show (optional): 'yes' if all numbered lines and orientation of tip need to be shown in the original image\r\n \"\"\"\r\n # Read image, convert to grayscale and reduce resolution for calculation speed\r\n if type(in_image) == str:\r\n in_image = cv2.imread(in_image)\r\n in_image = cv2.cvtColor(in_image, cv2.COLOR_BGR2GRAY)\r\n in_image = reduce_size(in_image, 40)\r\n # flipping along vertical axis (left becomes right)\r\n if flip == 'yes':\r\n in_image = cv2.flip(in_image, 1)\r\n\r\n # Read required values from config.ini file\r\n config_object = ConfigParser()\r\n config_object.read(configpath)\r\n imagepos = config_object[\"IMAGEPOS\"]\r\n # for edge detection:\r\n lower_threshold = int(imagepos[\"lower_threshold\"])\r\n upper_threshold = int(imagepos[\"upper_threshold\"])\r\n # for line detection from edge mask:\r\n theta_resolution = int(imagepos[\"theta_resolution\"])\r\n min_votes = int(imagepos[\"min_votes\"])\r\n minll = int(imagepos[\"minll\"])\r\n maxlg = int(imagepos[\"maxlg\"])\r\n\r\n # lpf filtering if selected see 'lpf' function above\r\n if filtering == 'yes':\r\n in_image = lpf(in_image)\r\n\r\n # creating edge mask then performing line detection\r\n edge_mask = cv2.Canny(image=in_image, threshold1=lower_threshold, threshold2=upper_threshold)\r\n lines = cv2.HoughLinesP(edge_mask, 1, np.pi / theta_resolution, min_votes, minLineLength=minll, maxLineGap=maxlg)\r\n\r\n # sorting lines by smallest x1 coord\r\n if lines is None:\r\n return None, None\r\n\r\n sorting_ind = np.argsort(lines[:, 0, 0])\r\n sorted_lines = np.zeros((len(lines[:, 0, 0]), 4), dtype='int16')\r\n for i in range(len(lines[:, 0, 0])):\r\n row_to_append = np.array(lines[sorting_ind[i], 0, :], dtype='int16')\r\n sorted_lines[i, :] = row_to_append\r\n if len(sorted_lines[:, 0]) < 30:\r\n print(\"image_proc2: \" + str(len(sorted_lines[:, 0])) + \" lines detected -> sorted_lines = \\n\", sorted_lines)\r\n else:\r\n print(\"image_proc2: Too many lines to print (\" + str(len(sorted_lines[:, 0])) + \" lines detected)\")\r\n\r\n # position calculation returning (x2, y2) of last line, (x_orientation, y_orientation)\r\n # y orientation with respect to the mid horizontal line\r\n y_mid = int(in_image.shape[0]/2)\r\n tip_dir = orientation(sorted_lines[-1, :], y_mid)\r\n\r\n tip_pos = (sorted_lines[-1, 2], sorted_lines[-1, 3])\r\n\r\n # ---------------------------- SHOW PART --------------------------------\r\n # showing lines in gray of original image if requested by 'show' argument\r\n if show == 'yes':\r\n small_color = cv2.cvtColor(in_image, cv2.COLOR_GRAY2BGR)\r\n purple = (200, 100, 200)\r\n yellow = (0, 255, 255)\r\n # putting lines in image\r\n for line in lines:\r\n x1, y1, x2, y2 = line[0]\r\n cv2.line(small_color, (x1, y1), (x2, y2), purple, 2)\r\n # putting numbers and direction arrow in image\r\n num_image = line_numbering(small_color, sorted_lines)\r\n endpoint = (tip_pos[0]+int(40*tip_dir[0]), tip_pos[1]+int(40*tip_dir[1]))\r\n if endpoint[0] > in_image.shape[1] or endpoint[1] > in_image.shape[0]:\r\n # reduce endpoint:\r\n endpoint = (tip_pos[0]+int(10*tip_dir[0]), tip_pos[1]+int(10*tip_dir[1]))\r\n print(\"image_proc->show part: endpoint arrow = \", endpoint)\r\n num_arrow_image = cv2.arrowedLine(num_image, tip_pos, endpoint, yellow, 2)\r\n cv2.imshow('Numbered Lines', num_arrow_image)\r\n print(\"image_proc->show part: Press enter to stop showing image(s)\")\r\n if cv2.waitKey(1) == 13:\r\n cv2.destroyAllWindows()\r\n\r\n return tip_pos, tip_dir\r\n","repo_name":"brmprnk/brachyosaurus","sub_path":"src/image_pos/image_proc2.py","file_name":"image_proc2.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2867388762","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 11 22:57:00 2022\n\n@author: maudb\n\"\"\"\n#%%Import all relevant modules \nimport pyreadr, os, random, time\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score, StratifiedShuffleSplit\nfrom sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score, balanced_accuracy_score\nfrom scipy.stats import wilcoxon\nfrom statsmodels.stats import multitest\nfrom Functions import import_data, delete_unsuccessful, delete_incorrect_last2blocks, delete_participant\nfrom Functions import delete_pp_block, display_scores, end_scores, select_columns, select_frames, balance_train_data\nscaler = StandardScaler()\nordinal_encoder = OrdinalEncoder()\nrun_number = 100\n\n#%%Load all the data & delete unsuccesful & inaccurate trials \ndelete_below85 = True\n\nopenface_map = r\"C:\\Users\\maudb\\Documents\\Psychologie\\2e_master_psychologie\\Master_thesis\\Pilot_Master_thesis\\OpenFace_output\"\nall_data = import_data(pp_numbers = np.array([[\"1\", \"10\"],[\"11\", \"20\"], [\"21\", \"34\"]]), datafile_path = openface_map)\n\naccurate_data = delete_incorrect_last2blocks(data = all_data)\nSuccessful_data = delete_unsuccessful(data = accurate_data)\n\n#%%Reduce size of the dataset & delete participants whose accuracy was below 85 and who did not understand task\n\n#Reduce size of the dataset \nAU_cols = [col for col in all_data.columns if ('AU' in col and '_r' in col)] \nfixed_cols = ['pp_number', 'block_count', 'Frame_count', 'Trial_number', 'Affect']\nsmaller_data = select_columns(all_data = Successful_data, fix_cols = fixed_cols, cols_of_interest = AU_cols)\n\n#change pp number of pp. 317 to 17\nsmaller_data[\"pp_number\"] = np.where(smaller_data.pp_number == 317, 17, smaller_data.pp_number)\n\n#Delete pp 34: she did not understand the task\ncleaned_data = delete_participant(smaller_data, pp_to_delete = 34)\n#Delete block 1 for participant 28 and 30: their accuracy was below 85% in this block \nif delete_below85 == True: \n # delete pp. 28, block 2 and pp. 30 block 2 (accuracy < 85%)\n cleaned_data = delete_pp_block(cleaned_data, 28, 1) # block number as stored (2nd block thus deleted)\n cleaned_data = delete_pp_block(cleaned_data, 30, 1)\n\n# Convert conditions to binary conditions\nConditions_data = cleaned_data[['Affect']]\nConditions_data_encoded = ordinal_encoder.fit_transform(Conditions_data)\ncleaned_data.insert(2, \"Cond_binary\", Conditions_data_encoded, True)\n#%% Analysis for FperF\n\nmetric = 'balanced_accuracy'\n\nframe_selection, frameselection_names, n_subsets = select_frames(analysis_type = 'FperF', data = cleaned_data)\nparticipants = np.unique(cleaned_data.pp_number).astype(int)\nblocks = np.unique(cleaned_data.block_count).astype(int)\n\n#Create empty arrays to store all the obtained mean accuracies for each pp. in each block at each frame_selection and within each repetition\nstore_all_means = np.empty([participants.shape[0], blocks.shape[0]-1, n_subsets])\n\nfor ipp, pp in zip(participants-1, participants): \n print(\"we're at pp {}\".format(pp))\n #select the data relevant for this participant \n pp_data = cleaned_data.loc[cleaned_data[\"pp_number\"] == pp]\n \n for subset_frame, isubset in zip(frame_selection, range(n_subsets)): \n subset_data = pp_data.loc[np.isin(pp_data.Frame_count, subset_frame)]\n \n train_data = subset_data[subset_data.block_count == 2]\n # balanced_train_data = balance_train_data(unbalanced_train_data = train_data)\n # print(np.unique(balanced_train_data.Cond_binary, return_counts = True))\n # train_x, train_y = balanced_train_data[AU_cols], balanced_train_data['Cond_binary']\n \n train_x, train_y = train_data[AU_cols], train_data['Cond_binary']\n classifier = svm.SVC(kernel = 'linear', C = 1)\n classifier.fit(train_x, train_y)\n \n for iblock, block in zip(blocks[:2], blocks[:2]+1): \n if (pp == 28 and block == 2) or (pp == 30 and block == 2): \n store_all_means[ipp, iblock, :] = np.nan\n else: \n test_data = subset_data[subset_data.block_count == iblock]\n test_x, test_y = test_data[AU_cols], test_data['Cond_binary']\n # accuracy = classifier.score(test_x, test_y)\n y_hat = classifier.predict(test_x)\n balanced_accuracy = balanced_accuracy_score(test_y, y_hat)\n \n store_all_means[ipp, iblock, isubset] = balanced_accuracy\n\n#Store the results in a file!\narray_dir = os.path.join(os.getcwd(), 'Stored_results')\nif not os.path.isdir(array_dir): os.makedirs(array_dir)\nnp.save(os.path.join(array_dir, \"mean_accuracies_{}_crossblocks_run{}_{}.npy\".format('FperF', run_number, metric)), store_all_means)\n\n#%%Analysis for meanAU\nmetric = 'balanced_accuracy'\n\nframe_selection, frameselection_names, n_subsets = select_frames(analysis_type = 'meanAU', data = cleaned_data)\nparticipants = np.unique(cleaned_data.pp_number).astype(int)\nblocks = np.unique(cleaned_data.block_count).astype(int)\n\n#Create empty arrays to store all the obtained mean accuracies for each pp. in each block at each frame_selection and within each repetition\nstore_all_means = np.empty([participants.shape[0], blocks.shape[0]-1, n_subsets])\n\ndef takemean_1pp1block1subsetdata(start_data = None): \n all_included_trials, indices = np.unique(start_data.Trial_number, return_index = True)\n n_trials = all_included_trials.shape[0]\n final_data_template = start_data.iloc[indices, :].copy(deep=False)\n final_data_template = final_data_template.iloc[:, :6]\n final_data_template.index = np.arange(0, final_data_template.shape[0])\n meanAU_pertrial = np.array([start_data[start_data.Trial_number == trial][AU_cols].mean() for trial in all_included_trials])\n meanAU_df = pd.DataFrame(meanAU_pertrial , columns = AU_cols)\n final_data = final_data_template.join(meanAU_df)\n return final_data\n\nfor ipp, pp in zip(participants-1, participants): \n print(\"we're at pp {}\".format(pp))\n #select the data relevant for this participant \n pp_data = cleaned_data.loc[cleaned_data[\"pp_number\"] == pp]\n \n for subset_frame, isubset in zip(frame_selection, range(n_subsets)): \n subset_data = pp_data.loc[np.isin(pp_data.Frame_count, subset_frame)]\n if isubset == 0: \n train_data = subset_data[subset_data.block_count == 2]\n \n #take mean within trial for the train data \n train_data_averaged = takemean_1pp1block1subsetdata(start_data = train_data)\n\n \n # ensure train data has as many positive as negative trials\n # balanced_train_data = balance_train_data(unbalanced_train_data = train_data_averaged)\n # print(np.unique(balanced_train_data.Cond_binary, return_counts = True))\n # train_x, train_y = balanced_train_data[AU_cols], balanced_train_data['Cond_binary']\n classifier = svm.SVC(kernel = 'linear', C = 1)\n classifier.fit(train_x, train_y)\n \n for iblock, block in zip(blocks[:2], blocks[:2]+1): \n if (pp == 28 and block == 2) or (pp == 30 and block == 2): \n store_all_means[ipp, iblock, :] = np.nan\n else: \n test_data = subset_data[subset_data.block_count == iblock]\n test_data_averaged = takemean_1pp1block1subsetdata(start_data = test_data)\n test_x, test_y = test_data_averaged[AU_cols], test_data_averaged['Cond_binary']\n # accuracy = classifier.score(test_x, test_y)\n test_y_hat = classifier.predict(test_x)\n check_values, counts = np.unique(test_y_hat, return_counts = True)\n balanced_accuracy = balanced_accuracy_score(test_y, test_y_hat)\n if check_values.shape[0] == 1: \n print(\"single pred for pp {}, block {}\".format(pp, block))\n print(\"accuracy: {}\".format(balanced_accuracy))\n \n\n store_all_means[ipp, iblock, isubset] = balanced_accuracy\n\n#Store the results in a file!\narray_dir = os.path.join(os.getcwd(), 'Stored_results')\nif not os.path.isdir(array_dir): os.makedirs(array_dir)\nnp.save(os.path.join(array_dir, \"mean_accuracies_{}_crossblocks_run{}_{}.npy\".format('meanAU', run_number, metric)), store_all_means)\n\n\n \n\n\n","repo_name":"MaudBeeckmans/OpenFace-project","sub_path":"Analysis/Crossblock_classification.py","file_name":"Crossblock_classification.py","file_ext":"py","file_size_in_byte":8589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16631629264","text":"import random\n\nfrom django.shortcuts import render, get_object_or_404\n\n# Create your views here./\nfrom basketapp.models import Basket\nfrom mainapp.models import ProductCategory, Product\n\n\ndef get_hot_product():\n products = Product.objects.all()\n\n return random.sample(list(products), 1)[0]\n\n\ndef get_same_products(hot_product):\n same_products = Product.objects.filter(category=hot_product.category).exclude(pk=hot_product.pk)\n\n return same_products\n\n\nhot_product = get_hot_product()\nsame_product = get_same_products(hot_product)\n\n\ndef products(request, pk=None):\n title = 'geekshop - каталог'\n products_menu = ProductCategory.objects.all()\n\n if pk is not None:\n if pk == 0:\n products = Product.objects.all().order_by('name')\n category = {'category_name': 'все'}\n else:\n category = get_object_or_404(ProductCategory, pk=pk)\n products = Product.objects.filter(category__pk=pk).order_by('name')\n\n context = {\n 'title': title,\n 'products_menu': products_menu,\n 'products': products,\n 'category': category,\n 'hot_product': hot_product,\n 'same_product': same_product,\n }\n\n return render(request, 'mainapp/products.html', context=context)\n\n context = {\n 'title': title,\n 'products_menu': products_menu,\n 'hot_product': hot_product,\n 'same_product': same_product,\n }\n\n return render(request, 'mainapp/products.html', context=context)\n\n\ndef product(request, pk):\n title = 'продукты'\n\n content = {\n 'title': title,\n 'links_menu': ProductCategory.objects.all(),\n 'product': get_object_or_404(Product, pk=pk),\n }\n\n return render(request, 'mainapp/product.html', content)\n","repo_name":"AndreyBibikoff/django","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8689234888","text":"from django.conf.urls import url\r\nfrom .views import delete_ajax_tab_modal, overview_tab, add_tab, edit_tab, delete_tab, reorder_tab, \\\r\n delete_ajax_modules_modal, overview_modules, add_modules, edit_modules, delete_modules, delete_modules_page, toggle_activation_view, toggle_tab_activation_view, reorder_module, \\\r\n get_version_ajax_modal, select_version, \\\r\n delete_version, add_version_comment, get_delete_version_ajax_modal, \\\r\n overview_reversion, revert_module_item\r\nfrom django.utils.translation import ugettext as _\r\n\r\nurlpatterns = [\r\n url(_('^tab/overview$').strip(),overview_tab,name=\"overviewtab\"),\r\n url(_('^tab/add$').strip(),add_tab,name=\"addtab\"),\r\n url(_('^tab/toggle-activation/(?P[0-9a-f-]+)$').strip(), toggle_tab_activation_view, name=\"activate-tabs\"),\r\n url(_('^tab/edit/(?P[0-9a-f-]+)$').strip(),edit_tab,name=\"edittab\"),\r\n url(_('^tab/delete/(?P[0-9a-f-]+)$').strip(),delete_tab,name=\"deletetab\"),\r\n url(_('^tab/reorder').strip(), reorder_tab, name=\"tab-reorder\"),\r\n url(_('^tab/delete/modal$').strip(),delete_ajax_tab_modal,name=\"deletemodaltab\"),\r\n\r\n url(_('^overview$').strip(),overview_modules,name=\"overviewmodules\"),\r\n url(_('^add$').strip(),add_modules,name=\"addmodules\"),\r\n url(_('^edit/(?P[0-9a-f-]+)$').strip(),edit_modules,name=\"editmodules\"),\r\n url(_('^toggle-activation/(?P[0-9a-f-]+)$').strip(), toggle_activation_view, name=\"activate-modules\"),\r\n url(_('^delete/(?P[0-9a-f-]+)$').strip(),delete_modules,name=\"deletemodules\"),\r\n url(_('^delete/modulepages/(?P[0-9a-f-]+)$').strip(),delete_modules_page,name=\"deletepagemodules\"),\r\n url(_('^reorder').strip(), reorder_module, name=\"module-reorder\"),\r\n url(_('^delete/modal$').strip(),delete_ajax_modules_modal,name=\"deletemodalmodules\"),\r\n\r\n url(_('^overview/reversion/(?P[a-z]+)/$').strip(),overview_reversion,name=\"overviewreversionmodule\"),\r\n url(_('^revert/(?P[a-z]+)/(?P[0-9a-f-]+)/$').strip(),revert_module_item,name=\"revertmodule\"),\r\n\r\n url(_('^version/modal/(?P[a-z]+)/$').strip(),get_version_ajax_modal,name=\"moduleversionmodal\"),\r\n url(_('^version/modal/delete/(?P[a-z]+)/$').strip(),get_delete_version_ajax_modal,name=\"moduledeleteversionmodal\"),\r\n url(_('^version/(?P[a-z]+)/(?P[0-9a-f-]+)$').strip(),select_version,name=\"moduleselectversion\"),\r\n url(_('^version/delete/(?P[a-z]+)/(?P[0-9a-f-]+)$').strip(),delete_version,name=\"moduledeleteversion\"),\r\n url(_('^version/comment/(?P[a-z]+)/(?P[0-9a-f-]+)$').strip(),add_version_comment,name=\"moduleaddversioncomment\"),\r\n]","repo_name":"KCuppens/GenesisV2","sub_path":"apps/modules/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18728696778","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom DecisionTreeClassifier import DecisionTreeClassifier\nfrom BaggingClassifier import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier as sklearn_RandomForestClassifier\nfrom sklearn import datasets\n\nclass RandomForestClassifier(BaggingClassifier):\n def __init__(self, max_depth=None, n_estimators=10, max_features='auto', \n criterion='entropy', random_seed=None):\n self.max_depth = max_depth\n self.n_estimators = n_estimators\n self.decision_trees = []\n self.max_features = max_features\n self.random_seed = random_seed\n self.criterion = criterion\n \n def fit(self, X, y, sample_weight=None):\n boosting_list = [RandomForestClassifier.Bootstrap(X, y) for i in range(self.n_estimators)]\n \n for b_x, b_y in boosting_list:\n dt = DecisionTreeClassifier(max_depth=self.max_depth, max_features=self.max_features,\n random_seed=self.random_seed, criterion=self.criterion)\n dt.fit(b_x, b_y, sample_weight=sample_weight)\n self.decision_trees.append(dt)\n\ndef main():\n iris = datasets.load_iris()\n n_samples, n_features = iris.data.shape\n X, y = iris.data, iris.target\n \n rf = RandomForestClassifier(max_features='log2', criterion='gini')\n rf.fit(X, y)\n \n pre = rf.predict(X)\n print('acc:', np.mean(pre == y))\n \n sk_rf = sklearn_RandomForestClassifier()\n sk_rf.fit(X, y)\n \n pre = sk_rf.predict(X)\n print('sklearn random forest acc:', np.mean(pre == y))\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Fukeng/Machine-learning","sub_path":"DecisionTree/RandomForestClassifier.py","file_name":"RandomForestClassifier.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"15325327700","text":"test_case = int(input())\nfor _ in range(test_case):\n data = input()\n even = ''\n odd = ''\n for index, value in enumerate(data):\n if index % 2 == 0:\n even += data[index]\n else:\n odd += data[index]\n print(f\"{even} {odd}\")\n","repo_name":"ShakilAhmmed/Problem_Solve","sub_path":"30_Days/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"19353314871","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom model import simulate, load_models\n\n\"\"\"\nDependencies:\nnumpy \nmatplotlib\nnumba (optional, speeds simulation up: pre-compiles functions to machine code)\n\"\"\"\n\n\ndef main():\n # tiny example program:\n\n example_cell_idx = 20\n\n # load model parameter:\n parameters = load_models(\"models.csv\")\n\n for example_cell_idx in range(len(parameters)):\n\n model_params = parameters[example_cell_idx]\n cell = model_params.pop('cell')\n EODf = model_params.pop('EODf')\n print(\"Example with cell:\", cell)\n\n # generate EOD-like stimulus with an amplitude step:\n deltat = model_params[\"deltat\"]\n stimulus_length = 2.0 # in seconds\n time = np.arange(0, stimulus_length, deltat)\n # baseline EOD with amplitude 1:\n stimulus = np.sin(2*np.pi*EODf*time)\n # amplitude step with given contrast:\n t0 = 0.5\n t1 = 1.5\n contrast = 0.3\n stimulus[int(t0//deltat):int(t1//deltat)] *= (1.0+contrast)\n\n # integrate the model:\n spikes = simulate(stimulus, **model_params)\n\n # some analysis and plotting:\n rate = instantaneous_rate(spikes, time)\n\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=\"col\")\n\n ax1.plot(time, stimulus)\n ax1.set_title(\"Stimulus\")\n ax1.set_ylabel(\"Amplitude in mV\")\n\n ax2.plot(time, rate)\n ax2.set_title(\"Model Frequency\")\n ax2.set_ylabel(\"Frequency in Hz\")\n ax2.set_xlabel(\"Time in s\")\n plt.show()\n plt.close()\n\n\n \ndef instantaneous_rate(spikes, time):\n \"\"\"Firing rate as the inverse of the interspike intervals.\n\n Parameter\n ---------\n spikes: ndarrays of floats\n Spike times of a single trial.\n time: ndarray of floats\n Times on which instantaneous rate is computed.\n\n Returns\n -------\n rate: ndarray of floats\n Instantaneous firing rate corresponding to `spikes`.\n \"\"\"\n isis = np.diff(spikes) # well, the ISIs\n inst_rate = 1 / isis # rate as inverse ISIs\n # indices (int!) of spike times in time array:\n dt = time[1] - time[0]\n spike_indices = np.asarray(np.round((spikes-time[0])/dt), int)\n spike_indices = spike_indices[(spike_indices >= 0) &\n (spike_indices < len(time))]\n rate = np.zeros(len(time))\n for i in range(len(spike_indices)-1): # for all spikes and ISIs, except the last\n # set the full ISI to the instantaneous rate of that ISI:\n rate[spike_indices[i]:spike_indices[i+1]] = inst_rate[i]\n return rate\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bendalab/punitmodel","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16560946914","text":"from django.http.response import Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.contrib.auth.models import User\n\nfrom article.models import Article, Review, Bookmark \n\n# EDITING AND DELETING ARTICLE IMPORT - CLASS BASED VIEWS\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\n\n# Create your views here.\ndef index(request):\n articles = Article.objects.filter(featured=True).order_by(\"-createdAt\")\n bookmarked = []\n\n if request.user.is_authenticated:\n for article in articles:\n bookmarkObject = Bookmark.objects.filter(article=article, user = request.user)\n if bookmarkObject:\n bookmarked.append(article.id)\n \n context = {\n 'articles': articles,\n 'bookmarked': bookmarked\n }\n\n return render(request, \"article/index.html\", context)\n\n\ndef articles(request):\n\n articles = Article.objects.all().order_by(\"-createdAt\")\n\n bookmarked = []\n\n # Pagination\n paginator = Paginator(articles, 3)\n\n page_number = request.GET.get(\"page\")\n\n articles = paginator.get_page(page_number)\n\n totalNum = articles.paginator.num_pages\n\n if request.user.is_authenticated:\n for article in articles:\n bookmarkObject = Bookmark.objects.filter(article=article, user = request.user)\n if bookmarkObject:\n bookmarked.append(article.id)\n\n context = {\n \"articles\": articles,\n \"range\": range(1, totalNum+1),\n 'bookmarked': bookmarked\n }\n\n\n return render(request, \"article/articles.html\", context)\n\n\ndef articleDetail(request, article_id):\n article = get_object_or_404(Article, pk=article_id)\n\n bookmarked = False\n\n if request.user.is_authenticated:\n bookmarkObject = Bookmark.objects.filter(article=article, user = request.user)\n if bookmarkObject:\n bookmarked = True\n\n reviews = Review.objects.filter(article = article).order_by(\"-createdAt\")\n\n context = {\n \"article\": article,\n \"reviews\": reviews,\n 'bookmarked': bookmarked\n }\n\n # To get rid of error of review passed in below function\n if 'error' in request.session:\n if request.session['count'] == 1:\n del request.session['error']\n del request.session['count']\n\n request.session['count'] = 1\n\n return render(request, \"article/articleDetail.html\", context)\n\ndef articleSearch(request):\n searchText = request.GET['searchText']\n \n articles = Article.objects.filter(title__contains=searchText)\n\n users = User.objects.filter(username__contains=searchText)\n\n bookmarked = []\n\n for user in users:\n userArticles = Article.objects.filter(user=user)\n articles = articles.union(userArticles)\n\n # Pagination\n paginator = Paginator(articles, 3)\n\n page_number = request.GET.get(\"page\")\n\n articles = paginator.get_page(page_number)\n\n totalNum = articles.paginator.num_pages\n\n if request.user.is_authenticated:\n for article in articles:\n bookmarkObject = Bookmark.objects.filter(article=article, user = request.user)\n if bookmarkObject:\n bookmarked.append(article.id)\n\n context = {\n \"articles\": articles,\n \"range\": range(1, totalNum+1),\n 'bookmarked': bookmarked\n }\n\n return render(request, \"article/articles.html\", context)\n\n\n\n\n\n# @login_required \n# def createArticle(request):\n# if request.method == \"GET\":\n# return render(request, \"article/create-article.html\")\n\n# if request.method == \"POST\":\n# title = request.POST['title']\n# body = request.POST['body']\n# user = request.user\n# cover_image = request.FILES['cover_image']\n\n# Article.objects.create(title=title, body=body, cover_image=cover_image, user=user)\n\n# return redirect(\"/article/create\")\n\n\nclass ArticleCreateView(LoginRequiredMixin, CreateView):\n model = Article\n fields = ['title', 'cover_image', 'body']\n template_name = 'article/create-article.html'\n \n # To pass in fields that are not set like user\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse(\"article.articles\") \n\n# EDITING AND DELETING ARTICLE\nclass ArticleUpdateView(LoginRequiredMixin, UpdateView):\n\n model = Article\n fields = ['title', 'cover_image', 'body']\n template_name = 'article/update-article.html'\n\n def get_queryset(self):\n queryset = super(ArticleUpdateView, self).get_queryset()\n queryset = queryset.filter(user=self.request.user)\n return queryset\n\n def form_valid(self, form):\n return super().form_valid(form)\n\n # Where to go after the Article is created\n def get_success_url(self):\n return reverse(\"user.user_profile\", kwargs={'username': self.request.user.username}) \n\n\nclass ArticleDeleteView(LoginRequiredMixin, DeleteView):\n model = Article\n template_name = 'article/delete-article.html'\n\n def get_queryset(self):\n queryset = super(ArticleDeleteView, self).get_queryset()\n queryset = queryset.filter(user=self.request.user)\n return queryset\n\n def get_success_url(self):\n return reverse(\"user.user_profile\", kwargs={'username': self.request.user.username}) \n\n\n\n@login_required\ndef submitReview(request):\n if request.method == \"POST\":\n title = request.POST['reviewTitle']\n body = request.POST['reviewBody']\n\n articleID = request.POST['articleID']\n article = Article.objects.get(id=articleID)\n\n reviewsOfUser = Review.objects.filter(user=request.user, article=article)\n reviewCount = reviewsOfUser.count()\n\n if reviewCount == 0:\n Review.objects.create(title=title, body=body, user= request.user, article=article)\n else:\n request.session['error'] = \"You can only provide one review\"\n request.session['count'] = 0\n \n return redirect(request.META['HTTP_REFERER'] + '#reviews') # request.META['HTTP_REFERER'] -> redirects to same page\n\n@login_required\ndef reviewUpdate(request, pk):\n if request.method == \"POST\":\n\n pk = request.POST['id']\n title = request.POST['title']\n body = request.POST['body']\n\n review = Review.objects.get(pk=pk)\n\n review.title = title\n review.body = body\n\n review.save()\n\n return HttpResponseRedirect(reverse(\"user.user_profile\", kwargs={'username': request.user.username} ))\n\n else: # GET REQUEST\n review = Review.objects.get(pk=pk)\n\n if not request.user == review.user:\n raise Http404\n\n context = {\n 'review': review\n }\n return render(request,'article/review-update.html', context)\n\n\nclass ReviewDeleteView(LoginRequiredMixin, DeleteView):\n model = Review\n template_name = 'article/review-delete.html'\n\n def get_success_url(self):\n return reverse(\"user.user_profile\", kwargs={'username': self.request.user.username}) \n\n@login_required\ndef bookmark(request,pk):\n user = request.user\n article = Article.objects.get(pk=pk)\n\n bookmarkObject = Bookmark.objects.filter(article=article, user=user)\n\n if bookmarkObject:\n bookmarked = bookmarkObject[0]\n bookmarked.delete()\n else:\n Bookmark.objects.create(user=user, article=article)\n\n htmlID = \"#\"+ str(article.id)\n\n return redirect(request.META['HTTP_REFERER'] + htmlID)","repo_name":"Jashann/django-articles-app","sub_path":"article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"13904730451","text":"import sys\nimport math\nimport copy\nimport json\nimport time\nimport random\n\nfrom itertools import chain\nfrom queue import Queue\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport bpy\nimport gpu\nimport bmesh\nfrom bmesh.types import BMesh, BMVert, BMEdge, BMFace\nfrom mathutils import Matrix, Vector\nfrom mathutils.bvhtree import BVHTree\nfrom mathutils.kdtree import KDTree\nfrom mathutils.geometry import normal as compute_normal, intersect_point_tri\n\nfrom ...addon_common.common import gpustate\nfrom ...addon_common.common import bmesh_render as bmegl\nfrom ...addon_common.common.blender import tag_redraw_all\nfrom ...addon_common.common.bmesh_render import triangulateFace, BufferedRender_Batch\nfrom ...addon_common.common.debug import dprint, Debugger\nfrom ...addon_common.common.decorators import stats_wrapper\nfrom ...addon_common.common.globals import Globals\nfrom ...addon_common.common.hasher import hash_object, hash_bmesh\nfrom ...addon_common.common.profiler import profiler\nfrom ...addon_common.common.maths import (\n Point, Direction, Normal, Frame,\n Point2D, Vec2D, Direction2D,\n Ray, XForm, BBox, Plane,\n)\nfrom ...addon_common.common.utils import min_index\n\nfrom ...config.options import options\n\nfrom .rfmesh_wrapper import (\n BMElemWrapper, RFVert, RFEdge, RFFace, RFEdgeSequence,\n)\n\n\n\nclass RFMeshRender():\n '''\n RFMeshRender handles rendering RFMeshes.\n '''\n\n cache = {}\n\n create_count = 0\n delete_count = 0\n\n @staticmethod\n @profiler.function\n def new(rfmesh, opts, always_dirty=False):\n # TODO: REIMPLEMENT CACHING!!\n # HAD TO DISABLE THIS BECAUSE 2.83 AND 2.90 WOULD CRASH\n # WHEN RESTARTING RF. PROBABLY DUE TO HOLDING REFS TO\n # OLD DATA (CRASH DUE TO FREEING INVALID DATA??)\n\n if False:\n with profiler.code('hashing object'):\n ho = hash_object(rfmesh.obj)\n with profiler.code('hashing bmesh'):\n hb = hash_bmesh(rfmesh.bme)\n h = (ho, hb)\n if h not in RFMeshRender.cache:\n RFMeshRender.creating = True\n RFMeshRender.cache[h] = RFMeshRender(rfmesh, opts)\n del RFMeshRender.creating\n rfmrender = RFMeshRender.cache[h]\n else:\n RFMeshRender.creating = True\n rfmrender = RFMeshRender(rfmesh, opts)\n del RFMeshRender.creating\n\n rfmrender.always_dirty = always_dirty\n return rfmrender\n\n @profiler.function\n def __init__(self, rfmesh, opts):\n assert hasattr(RFMeshRender, 'creating'), (\n 'Do not create new RFMeshRender directly!'\n 'Use RFMeshRender.new()')\n\n RFMeshRender.create_count += 1\n # print('RFMeshRender.__init__', RFMeshRender.create_count, RFMeshRender.delete_count)\n\n # initially loading asynchronously?\n self.async_load = options['async mesh loading']\n self._is_loading = False\n self._is_loaded = False\n\n self.load_verts = opts.get('load verts', True)\n self.load_edges = opts.get('load edges', True)\n self.load_faces = opts.get('load faces', True)\n\n self.buf_data_queue = Queue()\n self.buf_matrix_model = rfmesh.xform.to_gpubuffer_Model()\n self.buf_matrix_inverse = rfmesh.xform.to_gpubuffer_Inverse()\n self.buf_matrix_normal = rfmesh.xform.to_gpubuffer_Normal()\n self.buffered_renders_static = []\n self.buffered_renders_dynamic = []\n self.split = None\n self.drawing = Globals.drawing\n\n self.opts = {}\n self.replace_rfmesh(rfmesh)\n self.replace_opts(opts)\n\n def __del__(self):\n RFMeshRender.delete_count += 1\n # print('RFMeshRender.__del__', self.rfmesh, RFMeshRender.create_count, RFMeshRender.delete_count)\n self.bmesh.free()\n if hasattr(self, 'buf_matrix_model'): del self.buf_matrix_model\n if hasattr(self, 'buf_matrix_inverse'): del self.buf_matrix_inverse\n if hasattr(self, 'buf_matrix_normal'): del self.buf_matrix_normal\n if hasattr(self, 'buffered_renders_static'): del self.buffered_renders_static\n if hasattr(self, 'buffered_renders_dynamic'): del self.buffered_renders_dynamic\n if hasattr(self, 'bmesh'): del self.bmesh\n if hasattr(self, 'rfmesh'): del self.rfmesh\n\n @profiler.function\n def replace_opts(self, opts):\n opts = dict(opts)\n opts['dpi mult'] = self.drawing.get_dpi_mult()\n if opts == self.opts: return\n self.opts = opts\n self.rfmesh_version = None\n\n @profiler.function\n def replace_rfmesh(self, rfmesh):\n self.rfmesh = rfmesh\n self.bmesh = rfmesh.bme\n self.rfmesh_version = None\n\n def dirty(self):\n self.rfmesh_version = None\n\n @profiler.function\n def add_buffered_render(self, draw_type, data, static):\n batch = BufferedRender_Batch(draw_type)\n batch.buffer(data['vco'], data['vno'], data['sel'], data['warn'], data['pin'], data['seam'])\n if static: self.buffered_renders_static.append(batch)\n else: self.buffered_renders_dynamic.append(batch)\n\n def split_visualization(self, verts=None, edges=None, faces=None):\n if not verts and not edges and not faces:\n self.split = None\n else:\n unwrap = BMElemWrapper._unwrap\n verts = { unwrap(v) for v in verts } if verts else set()\n edges = { unwrap(e) for e in edges } if edges else set()\n faces = { unwrap(f) for f in faces } if faces else set()\n edges.update(e for v in verts for e in v.link_edges)\n faces.update(f for e in edges for f in e.link_faces)\n verts.update(v for e in edges for v in e.verts)\n verts.update(v for f in faces for v in f.verts)\n edges.update(e for f in faces for e in f.edges)\n self.split = {\n 'gathered static': False,\n 'static verts': { v for v in self.bmesh.verts if v not in verts },\n 'static edges': { e for e in self.bmesh.edges if e not in edges },\n 'static faces': { f for f in self.bmesh.faces if f not in faces },\n 'gathered dynamic': False,\n 'dynamic verts': verts,\n 'dynamic edges': edges,\n 'dynamic faces': faces,\n }\n self.dirty()\n\n @profiler.function\n def _gather_data(self):\n if not self.split:\n self.buffered_renders_static = []\n self.buffered_renders_dynamic = []\n else:\n if not self.split['gathered dynamic']:\n self.buffered_renders_static = []\n self.split['gathered dynamic'] = True\n self.buffered_renders_dynamic = []\n\n mirror_axes = self.rfmesh.mirror_mod.xyz if self.rfmesh.mirror_mod else []\n mirror_x = 'x' in mirror_axes\n mirror_y = 'y' in mirror_axes\n mirror_z = 'z' in mirror_axes\n\n layer_pin = self.rfmesh.layer_pin\n\n def gather(verts, edges, faces, static):\n vert_count = 100_000\n edge_count = 50_000\n face_count = 10_000\n\n '''\n IMPORTANT NOTE: DO NOT USE PROFILER INSIDE THIS FUNCTION IF LOADING ASYNCHRONOUSLY!\n '''\n def sel(g):\n return 1.0 if g.select else 0.0\n def warn_vert(g):\n if mirror_x and g.co.x <= 0.0001: return 0.0\n if mirror_y and g.co.y >= -0.0001: return 0.0\n if mirror_z and g.co.z <= 0.0001: return 0.0\n return 0.0 if g.is_manifold and not g.is_boundary else 1.0\n def warn_edge(g):\n v0,v1 = g.verts\n if mirror_x and v0.co.x <= 0.0001 and v1.co.x <= 0.0001: return 0.0\n if mirror_y and v0.co.y >= -0.0001 and v1.co.y >= -0.0001: return 0.0\n if mirror_z and v0.co.z <= 0.0001 and v1.co.z <= 0.0001: return 0.0\n return 0.0 if g.is_manifold else 1.0\n def warn_face(g):\n return 1.0\n\n def pin_vert(g):\n if not layer_pin: return 0.0\n return 1.0 if g[layer_pin] else 0.0\n def pin_edge(g):\n return 1.0 if all(pin_vert(v) for v in g.verts) else 0.0\n def pin_face(g):\n return 1.0 if all(pin_vert(v) for v in g.verts) else 0.0\n\n def seam_vert(g):\n return 1.0 if any(e.seam for e in g.link_edges) else 0.0\n def seam_edge(g):\n return 1.0 if g.seam else 0.0\n def seam_face(g):\n return 0.0\n\n try:\n time_start = time.time()\n\n # NOTE: duplicating data rather than using indexing, otherwise\n # selection will bleed\n with profiler.code('gathering', enabled=not self.async_load):\n if self.load_faces:\n tri_faces = [(bmf, list(bmvs))\n for bmf in faces\n if bmf.is_valid and not bmf.hide\n for bmvs in triangulateFace(bmf.verts)\n ]\n l = len(tri_faces)\n for i0 in range(0, l, face_count):\n i1 = min(l, i0 + face_count)\n face_data = {\n 'vco': [ tuple(bmv.co) for bmf, verts in tri_faces[i0:i1] for bmv in verts ],\n 'vno': [ tuple(bmv.normal) for bmf, verts in tri_faces[i0:i1] for bmv in verts ],\n 'sel': [ sel(bmf) for bmf, verts in tri_faces[i0:i1] for _ in verts ],\n 'warn': [ warn_face(bmf) for bmf, verts in tri_faces[i0:i1] for _ in verts ],\n 'pin': [ pin_face(bmf) for bmf, verts in tri_faces[i0:i1] for _ in verts ],\n 'seam': [ seam_face(bmf) for bmf, verts in tri_faces[i0:i1] for _ in verts ],\n 'idx': None, # list(range(len(tri_faces)*3)),\n }\n if self.async_load:\n self.buf_data_queue.put((BufferedRender_Batch.TRIANGLES, face_data, static))\n tag_redraw_all('buffer update')\n else:\n self.add_buffered_render(BufferedRender_Batch.TRIANGLES, face_data, static)\n\n if self.load_edges:\n edges = [bme for bme in edges if bme.is_valid and not bme.hide]\n l = len(edges)\n for i0 in range(0, l, edge_count):\n i1 = min(l, i0 + edge_count)\n edge_data = {\n 'vco': [ tuple(bmv.co) for bme in edges[i0:i1] for bmv in bme.verts ],\n 'vno': [ tuple(bmv.normal) for bme in edges[i0:i1] for bmv in bme.verts ],\n 'sel': [ sel(bme) for bme in edges[i0:i1] for _ in bme.verts ],\n 'warn': [ warn_edge(bme) for bme in edges[i0:i1] for _ in bme.verts ],\n 'pin': [ pin_edge(bme) for bme in edges[i0:i1] for _ in bme.verts ],\n 'seam': [ seam_edge(bme) for bme in edges[i0:i1] for _ in bme.verts ],\n 'idx': None, # list(range(len(self.bmesh.edges)*2)),\n }\n if self.async_load:\n self.buf_data_queue.put((BufferedRender_Batch.LINES, edge_data, static))\n tag_redraw_all('buffer update')\n else:\n self.add_buffered_render(BufferedRender_Batch.LINES, edge_data, static)\n\n if self.load_verts:\n verts = [bmv for bmv in verts if bmv.is_valid and not bmv.hide]\n l = len(verts)\n for i0 in range(0, l, vert_count):\n i1 = min(l, i0 + vert_count)\n vert_data = {\n 'vco': [ tuple(bmv.co) for bmv in verts[i0:i1] ],\n 'vno': [ tuple(bmv.normal) for bmv in verts[i0:i1] ],\n 'sel': [ sel(bmv) for bmv in verts[i0:i1] ],\n 'warn': [ warn_vert(bmv) for bmv in verts[i0:i1] ],\n 'pin': [ pin_vert(bmv) for bmv in verts[i0:i1] ],\n 'seam': [ seam_vert(bmv) for bmv in verts[i0:i1] ],\n 'idx': None, # list(range(len(self.bmesh.verts))),\n }\n if self.async_load:\n self.buf_data_queue.put((BufferedRender_Batch.POINTS, vert_data, static))\n tag_redraw_all('buffer update')\n else:\n self.add_buffered_render(BufferedRender_Batch.POINTS, vert_data, static)\n\n if self.async_load:\n self.buf_data_queue.put('done')\n\n time_end = time.time()\n # print('RFMeshRender: Gather time: %0.2f' % (time_end - time_start))\n\n except Exception as e:\n print('EXCEPTION WHILE GATHERING: ' + str(e))\n raise e\n\n # self.bmesh.verts.ensure_lookup_table()\n for bmv in self.bmesh.verts:\n if bmv.link_faces:\n bmv.normal_update()\n # for bmelem in chain(self.bmesh.faces, self.bmesh.edges):\n # bmelem.normal_update()\n\n self._is_loading = True\n self._is_loaded = False\n\n # with profiler.code('Gathering data for RFMesh (%ssync)' % ('a' if self.async_load else '')):\n if not self.async_load:\n #print(f'RFMeshRender._gather: synchronous')\n #profiler.function(gather)()\n if not self.split:\n #print(f' v={len(self.bmesh.verts)} e={len(self.bmesh.edges)} f={len(self.bmesh.faces)}')\n gather(self.bmesh.verts, self.bmesh.edges, self.bmesh.faces, True)\n else:\n if not self.split['gathered static']:\n #print(f' sv={len(self.split[\"static verts\"])} se={len(self.split[\"static edges\"])} sf={len(self.split[\"static faces\"])}')\n gather(self.split['static verts'], self.split['static edges'], self.split['static faces'], True)\n self.split['gathered static'] = True\n #print(f' dv={len(self.split[\"dynamic verts\"])} de={len(self.split[\"dynamic edges\"])} df={len(self.split[\"dynamic faces\"])}')\n gather(self.split['dynamic verts'], self.split['dynamic edges'], self.split['dynamic faces'], False)\n else:\n #print(f'RFMeshRender._gather: asynchronous')\n #self._gather_submit = ThreadPoolExecutor.submit(gather)\n e = ThreadPoolExecutor()\n if not self.split:\n #print(f' v={len(self.bmesh.verts)} e={len(self.bmesh.edges)} f={len(self.bmesh.faces)}')\n e.submit(lambda : gather(self.bmesh.verts, self.bmesh.edges, self.bmesh.faces, True))\n else:\n if not self.split['gathered static']:\n #print(f' sv={len(self.split[\"static verts\"])} se={len(self.split[\"static edges\"])} sf={len(self.split[\"static faces\"])}')\n e.submit(lambda : gather(self.split['static verts'], self.split['static edges'], self.split['static faces'], True))\n self.split['gathered static'] = True\n #print(f' dv={len(self.split[\"dynamic verts\"])} de={len(self.split[\"dynamic edges\"])} df={len(self.split[\"dynamic faces\"])}')\n e.submit(lambda : gather(self.split['dynamic verts'], self.split['dynamic edges'], self.split['dynamic faces'], False))\n\n @profiler.function\n def clean(self):\n if not self.buf_data_queue.empty():\n tag_redraw_all('buffer update')\n while not self.buf_data_queue.empty():\n data = self.buf_data_queue.get()\n if data == 'done':\n self._is_loading = False\n self._is_loaded = True\n self.async_load = False\n else:\n self.add_buffered_render(*data)\n\n try:\n # return if rfmesh hasn't changed\n self.rfmesh.clean()\n ver = self.rfmesh.get_version() if not self.always_dirty else None\n if self.rfmesh_version == ver:\n profiler.add_note('--> is clean')\n return\n # profiler.add_note(\n # '--> versions: \"%s\",\n # \"%s\"' % (str(self.rfmesh_version),\n # str(ver))\n # )\n # make not dirty first in case bad things happen while drawing\n self.rfmesh_version = ver\n self._gather_data()\n except:\n Debugger.print_exception()\n profiler.add_note('--> exception')\n pass\n\n profiler.add_note('--> passed through')\n\n @profiler.function\n def draw(\n self,\n view_forward, unit_scaling_factor,\n buf_matrix_target, buf_matrix_target_inv,\n buf_matrix_view, buf_matrix_view_invtrans,\n buf_matrix_proj,\n alpha_above, alpha_below,\n cull_backfaces, alpha_backface,\n draw_mirrored,\n symmetry=None, symmetry_view=None,\n symmetry_effect=0.0, symmetry_frame: Frame=None\n ):\n self.clean()\n if not self.buffered_renders_static and not self.buffered_renders_dynamic: return\n\n try:\n gpustate.depth_test('LESS_EQUAL')\n gpustate.depth_mask(False) # do not overwrite the depth buffer\n\n opts = dict(self.opts)\n\n opts['matrix model'] = self.rfmesh.xform.mx_p\n opts['matrix normal'] = self.rfmesh.xform.mx_n\n opts['matrix target'] = buf_matrix_target\n opts['matrix target inverse'] = buf_matrix_target_inv\n opts['matrix view'] = buf_matrix_view\n opts['matrix view normal'] = buf_matrix_view_invtrans\n opts['matrix projection'] = buf_matrix_proj\n opts['forward direction'] = view_forward\n opts['unit scaling factor'] = unit_scaling_factor\n\n opts['symmetry'] = symmetry\n opts['symmetry frame'] = symmetry_frame\n opts['symmetry view'] = symmetry_view\n opts['symmetry effect'] = symmetry_effect\n opts['draw mirrored'] = draw_mirrored\n\n bmegl.glSetDefaultOptions()\n\n opts['no warning'] = not options['warn non-manifold']\n opts['no pinned'] = not options['show pinned']\n opts['no seam'] = not options['show seam']\n\n opts['cull backfaces'] = cull_backfaces\n opts['alpha backface'] = alpha_backface\n opts['dpi mult'] = self.drawing.get_dpi_mult()\n mirror_axes = self.rfmesh.mirror_mod.xyz if self.rfmesh.mirror_mod else []\n for axis in mirror_axes: opts['mirror %s' % axis] = True\n\n if not opts.get('no below', False):\n # draw geometry hidden behind\n # geometry below\n opts['depth test'] = 'GREATER'\n # opts['depth mask'] = False\n opts['poly hidden'] = 1 - alpha_below\n opts['poly mirror hidden'] = 1 - alpha_below\n opts['line hidden'] = 1 - alpha_below\n opts['line mirror hidden'] = 1 - alpha_below\n opts['point hidden'] = 1 - alpha_below\n opts['point mirror hidden'] = 1 - alpha_below\n for buffered_render in chain(self.buffered_renders_static, self.buffered_renders_dynamic):\n buffered_render.draw(opts)\n\n # geometry above\n opts['depth test'] = 'LESS_EQUAL'\n # opts['depth mask'] = False\n opts['poly hidden'] = 1 - alpha_above\n opts['poly mirror hidden'] = 1 - alpha_above\n opts['line hidden'] = 1 - alpha_above\n opts['line mirror hidden'] = 1 - alpha_above\n opts['point hidden'] = 1 - alpha_above\n opts['point mirror hidden'] = 1 - alpha_above\n for buffered_render in chain(self.buffered_renders_static, self.buffered_renders_dynamic):\n buffered_render.draw(opts)\n\n gpustate.depth_test('LESS_EQUAL')\n gpustate.depth_mask(True)\n except:\n Debugger.print_exception()\n pass\n","repo_name":"CGCookie/retopoflow","sub_path":"retopoflow/rfmesh/rfmesh_render.py","file_name":"rfmesh_render.py","file_ext":"py","file_size_in_byte":21109,"program_lang":"python","lang":"en","doc_type":"code","stars":2072,"dataset":"github-code","pt":"81"} +{"seq_id":"21896585955","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\npython mrm2csv.py filename.wiff\n\nCreated on Wed Jun 04 11:39:29 2014\n\n@author: marneyl\n\"\"\"\n\nimport sys\nimport threading\nimport time\nimport pymzml\nimport os\nimport pandas\n\ndef main():\n\tfilename = sys.argv[1]\n\tdata = getSRM(filename)\n\tdata['transitions'].to_csv(os.path.splitext(filename)[0]+'.csv')\n\t\t\nclass mzML_conv(threading.Thread):\n def __init__(self,filename):\n self.filename = filename\n threading.Thread.__init__(self)\n def run(self):\n os.system(\"msconvert \" + self.filename + \" --mzML\" + ' -o ' + filename + '.mzML')\n\t\ndef getSRM(filename):\n data = dict()\n transitions = dict()\n numtransition = list()\n masses = list()\n msrun = pymzml.run.Reader(filename)\n for spectra in msrun:\n if spectra['id'] != 'TIC':\n x = str.split(spectra['id'])\n transition = x[2].split('=')[1] + '-' + x[3].split('=')[1]\n numtransition.append(x[-1].split('=')[1])\n masses.append(transition)\n transitions[transition] = (spectra.mz, spectra.i)\n data['transitions'] = transitions\n data['numtransition'] = numtransition\n data['masses'] = masses\n return data\n\nmain()","repo_name":"marneylc/LCMS_highthroughput","sub_path":"python/mrm2csv.py","file_name":"mrm2csv.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"20991457441","text":"\"\"\"\n给定一个无序的整数数组,找到其中最长上升子序列的长度。\n\n示例:\n\n输入: [10,9,2,5,3,7,101,18]\n输出: 4\n解释: 最长的上升子序列是 [2,3,7,101],它的长度是 4。\n说明:\n\n可能会有多种最长上升子序列的组合,你只需要输出对应的长度即可。\n你算法的时间复杂度应该为 O(n2) 。\n进阶: 你能将算法的时间复杂度降低到 O(n log n) 吗?\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/longest-increasing-subsequence\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n def lengthOfLIS(self, nums) -> int:\n # 1404ms 14.8MB\n # DP算法\n if not nums:\n return 0\n\n dp_list = []\n for i in range(len(nums)):\n dp_list.append(1)\n for j in range(i):\n if nums[i] > nums[j]:\n dp_list[i] = max(dp_list[i], dp_list[j] + 1)\n\n return max(dp_list)\n\n\ntest = Solution()\nprint(test.lengthOfLIS(\n [-2,-1]\n))","repo_name":"flashlightli/math_question","sub_path":"leetcode_question/mid_question/300_Longest_Increasing_Subsequence.py","file_name":"300_Longest_Increasing_Subsequence.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72759555786","text":"from algorithm.LanguagePredict import create_model_unigram, create_model_ngram\nfrom functions.test_data import test_data, get_feature_accuracy, test_data_ngram\nfrom utilities.utils import write_to_csv, write_accuracy_to_csv\n\n\ndef run_unigram():\n create_model_unigram()\n text, languages, predicted, accuracy, features = test_data(\"test_data/test_data_text.csv\")\n write_to_csv(\"result/result_test_data.csv\", text, languages, predicted)\n languages_accuracy = get_feature_accuracy(\"result/result_test_data.csv\", features)\n write_accuracy_to_csv(\"result/language_accuracy.csv\", languages_accuracy)\n print(\"Accuracy of detecting from test data is \", accuracy)\n\n\ndef run_ngram():\n accuracy_sc, cv = create_model_ngram(1)\n text, languages, predicted, accuracy, features = test_data_ngram(\"test_data/test_data_text.csv\", cv)\n write_to_csv(\"result/result_test_data_ngram.csv\", text, languages, predicted)\n languages_accuracy = get_feature_accuracy(\"result/result_test_data_ngram.csv\", features)\n write_accuracy_to_csv(\"result/language_accuracy_ngram.csv\", languages_accuracy)\n print(\"Accuracy of detecting from test data is \", accuracy)\n\n\ndef main():\n run_unigram()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"georgeMarian1999/language-detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24935218026","text":"import os\n# 书\nclass Book:\n def __init__(self,name,author,status): # 书名、作者、借出状态\n self.name = name\n self.author = author\n self.status = status\n def __eq__(self, other): # 自定义 book对象相等的条件\n return self.__dict__ == other.__dict__\n def __str__(self):\n return f'self.name'\n# 读者\nclass Reader:\n def __init__(self, name):\n self.name = name\n self.books = []\n # 借书\n def borrow_book(self,book,bookbase):\n if book in self.books:\n print('此书你已经借过了')\n else:\n bookbase.jie(book,self)\n self.books.append(book.name)\n print(f'借书{book.name} 成功')\n # 还书\n def return_book(self, book, bookbase):\n if book.name not in self.books:\n print('此书你没有借过')\n else:\n bookbase.huan(book,self)\n self.books.remove(book.name)\n print(f'借书{book.name} 成功')\n# 图书库\nclass BookBase:\n def __init__(self):\n self.book_list = []\n # 利用文件books2.txt 初始化book_list\n def init_from_file(self, file):\n try:\n with open(file, 'r',encoding='utf-8') as file:\n lines = file.readlines()\n except Exception as err:\n print(err)\n for ln in lines:\n line = ln.strip().split(' ')\n sss = tuple(line[0][1:-1].split(','))\n b = Book(*sss)\n names = line[3][1:-2].split(',') if line[3][1:-2] else [] # 如果为[] ,则返回空数组\n dict0 = {'book_info':b, 'total_num':int(line[1]), 'surplus_num':int(line[2]), 'reader_names':names}\n self.book_list.append(dict0)\n def __str__(self):\n string0 = ''\n for b in self.book_list:\n string0 += f'书名:{str(b[\"book_info\"])}, 总数:{b[\"total_num\"]}本, 剩余:{b[\"surplus_num\"]}本, 借阅者:{str(b[\"reader_names\"])}\\n'\n return string0\n # 新书上架\n def add_newbook(self, book, total_num):\n for b in self.book_list:\n if book.name == b['book_info'].name:\n print('此书已经在架上!')\n break\n else:\n dict0 = {'book_info':book, 'total_num':total_num, 'surplus_num':total_num, 'reader_names':[]}\n self.book_list.append(dict0)\n # 写入books2.txt文件\n p = os.path.dirname(__file__)\n with open(os.path.join(p,'books2.txt'),'w',encoding='utf-8') as file:\n for bk in self.book_list:\n file.write(f'[{bk[\"book_info\"].name},{bk[\"book_info\"].author}] {bk[\"total_num\"]} {bk[\"total_num\"]} []\\n')\n print(f'新书《{book.name}》上架成功!')\n # 旧书下架 \n def del_book(self, book):\n for b in self.book_list:\n if book.name == b['book_info'].name:\n self.book_list.remove(b)\n # 更新books2.txt文件\n p = os.path.dirname(__file__)\n with open(os.path.join(p,'books2.txt'),'w',encoding='utf-8') as file:\n for bk in self.book_list:\n file.write(f'[{bk[\"book_info\"].name},{bk[\"book_info\"].author}] {bk[\"total_num\"]} {bk[\"total_num\"]} []\\n')\n print(f'旧书《{book.name}》下架成功!')\n break\n else:\n print('此书不在架上!')\n # 借书\n def jie(self, book, reader):\n # {'book_info':b, 'total_num':line[1], 'surplus_num':line[2], 'reader_names':names}\n for b in self.book_list:\n # if book in reader.books:\n # print('此书你已经借过了!')\n # break\n if b['surplus_num'] == 0:\n print('此书已被借完,请稍后再来!')\n break\n elif b['book_info'] == book:\n b['surplus_num'] -= 1\n b['reader_names'].append(reader.name)\n break\n else:\n print('没有此书!')\n # 还书\n def huan(self, book, reader):\n for b in self.book_list:\n if book.name not in reader.books:\n print('此书你没有借过了!') # 商榷?????\n break\n elif b['total_num'] == b['surplus_num']:\n print('此书不是从我馆借出的!')\n break\n elif b['book_info'] == book:\n b['surplus_num'] += 1\n b['reader_names'].remove(reader.name)\n break\n else:\n print('没有此书!')\n\n\nbs = BookBase()\nbs.init_from_file(r'E:\\Learn\\python-base\\案例\\librarysys\\books2.txt')\nprint(bs)\n# r0 = Reader('jack')\n# b0 = Book('三国','罗贯中')\n# r0.borrow_book(b0,bs)\n# # b1 = Book('红楼梦','曹雪芹')\n# print(bs)\n# r0.return_book(b0, bs)\nb2 = Book('三国','罗贯中')\nbs.add_newbook(b2,5)\nprint(bs)\n\n\n# 主函数\ndef main():\n print('*'*30,'欢迎进入图书系统','*'*30)\n islogin = False\n user_name = ''\n while True:\n n_select = input('\\n请选择进入的功能序号:1 查看图书列表 2 ���录 3 注册用户 4 借书 5 还书 q退出\\n')\n # 展示现存图书\n if n_select == '1':\n show_book()\n continue\n # 登陆\n elif n_select == '2':\n user_name = login() # 接收返回的用户信息\n if user_name: # 如果登陆成功,则标记islogin\n islogin = True\n continue\n # 用户注册\n elif n_select == '3':\n regist()\n islogin = False\n continue\n # 借书\n elif n_select == '4':\n if not islogin: # 如果没登录则 提示登陆\n print('请登陆后再借书!')\n continue\n borrow_book(user_name)\n continue\n # 还书\n elif n_select == '5':\n if not islogin:\n print('请登陆后再还书!')\n continue\n return_book(user_name)\n continue\n elif n_select == 'q':\n break\n else:\n print('请选择正确的序号!')\n\n# if __name__ == \"__main__\":\n# main()\n","repo_name":"heting-intesim/python-base","sub_path":"案例/librarysys/lib2_obj.py","file_name":"lib2_obj.py","file_ext":"py","file_size_in_byte":6267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27250923440","text":"import tensorflow as tf\r\n\r\n\"\"\"\r\n把一个序列放进随机队列,再拿出来,这些数据就变成了无序了\r\n\"\"\"\r\nit = tf.data.Dataset.range(10).map(lambda x: tf.cast(x, tf.int32)).make_one_shot_iterator()\r\nq = tf.RandomShuffleQueue(10, min_after_dequeue=0, dtypes=[tf.int32], shapes=[tuple()])\r\nit_next = it.get_next()\r\nprint(it_next)\r\nenq = q.enqueue(it_next)\r\nqsize = q.size()\r\ndeq = q.dequeue()\r\nwith tf.Session() as sess:\r\n sess.run((tf.global_variables_initializer(), tf.local_variables_initializer()))\r\n for i in range(10):\r\n print(sess.run([enq, qsize]))\r\n while sess.run(qsize):\r\n print(sess.run(deq))\r\n","repo_name":"weiyinfu/learnTensorflow","sub_path":"tensorflow_basic/队列之random_shuffle_queue.py","file_name":"队列之random_shuffle_queue.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14508616625","text":"import unittest\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom simple_sr.utils.image import image_utils, metrics\n\nDUMP_RECONSTRUCTED = True\nDATA_DIR = \"./tests/data\"\nSAVE_DIR = \"./tests/data/reconstructed\"\nPATCH_DIMS = [(1, 1), (2, 2), (3, 3), (3, 1), (1, 3), (2, 3), (3, 2)]\n\n\nclass TestImageUtils(unittest.TestCase):\n\n def setUp(self):\n self.mat_3x3 = tf.constant([\n [[1, 1, 1], [2, 2, 2], [3, 3, 3]],\n [[4, 4, 4], [5, 5, 5], [6, 6, 6]],\n [[7, 7, 7], [8, 8, 8], [9, 9, 9]]\n ])\n\n self.mat_5x3 = tf.constant([\n [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5]],\n [[6, 6, 6], [7, 7, 7], [8, 8, 8], [9, 9, 9], [10, 10, 10]],\n [[11, 11, 11], [12, 12, 12], [13, 13, 13], [14, 14, 14], [15, 15, 15]]\n ])\n self.matrices = [\n self.mat_3x3,\n self.mat_5x3\n ]\n\n if DUMP_RECONSTRUCTED:\n os.makedirs(SAVE_DIR, exist_ok=True)\n ds = tf.data.Dataset.list_files(\n [os.path.join(DATA_DIR, fname) for fname in os.listdir(DATA_DIR)\n if os.path.isfile(os.path.join(DATA_DIR, fname))]\n )\n ds = ds.map(tf.io.read_file)\n ds = ds.map(tf.image.decode_png).batch(1)\n self.images = ds\n\n def test_segmentation(self):\n for patch_dim in PATCH_DIMS:\n for matrix in self.matrices:\n patches, padding = image_utils.segment_into_patches(\n matrix, patch_width=patch_dim[0], patch_height=patch_dim[1]\n )\n self.assertEqual(4, patches.shape.rank)\n self.assertEqual(patch_dim[0], patches.shape[2])\n self.assertEqual(patch_dim[1], patches.shape[1])\n\n def test_segment_and_reconstruct(self):\n for patch_dim in PATCH_DIMS:\n for matrix in self.matrices:\n patches, padding = image_utils.segment_into_patches(\n matrix, patch_width=patch_dim[0], patch_height=patch_dim[1]\n )\n\n reconstructed = image_utils.reconstruct_from_patches(\n patches, original_height=matrix.shape[0], original_width=matrix.shape[1],\n horizontal_padding=padding[0][1], vertical_padding=padding[1][1]\n )\n self._assert_shape(matrix, reconstructed)\n\n def test_segment_with_overlap_and_reconstruct(self):\n patch_dims = ((32, 32), (64, 64), (128, 128))\n for idx, img in enumerate(self.images):\n for patch_dim in patch_dims:\n pixel_overlap = patch_dim[0] // 4\n patches, padding = image_utils.segment_into_patches(\n img, patch_width=patch_dim[0], patch_height=patch_dim[1],\n pixel_overlap=pixel_overlap\n )\n\n reconstructed = image_utils.reconstruct_from_overlapping_patches(\n patches, image_height=img.shape[1], image_width=img.shape[2],\n pixel_overlap=pixel_overlap,\n horizontal_padding=(padding[0][1] - pixel_overlap),\n vertical_padding=(padding[1][1] - pixel_overlap)\n )\n _img = img\n if _img.shape.rank == 4:\n _img = tf.reshape(_img, (_img.shape[1:]))\n self._assert_shape(_img, reconstructed)\n self._assert_content(_img, reconstructed)\n self._dump_reconstructed(reconstructed, patch_dim, idx)\n\n def test_segment_and_reconstruct_real_image(self):\n patch_dims = ((32, 32), (64, 64), (128, 128))\n for idx, img in enumerate(self.images):\n img = image_utils._extract_tensor(img)\n for patch_dim in patch_dims:\n patches, padding = image_utils.segment_into_patches(\n img, patch_width=patch_dim[0], patch_height=patch_dim[1]\n )\n\n reconstructed = image_utils.reconstruct_from_patches(\n patches, original_height=img.shape[0], original_width=img.shape[1],\n horizontal_padding=padding[0][1], vertical_padding=padding[1][1]\n )\n _img = img\n if _img.shape.rank == 4:\n _img = tf.reshape(_img, (_img.shape[1:]))\n self._assert_shape(_img, reconstructed)\n self._assert_content(_img, reconstructed)\n self._dump_reconstructed(reconstructed, patch_dim, idx)\n\n def _dump_reconstructed(self, reconstructed, patch_dims, idx):\n if DUMP_RECONSTRUCTED:\n recon_img = image_utils.tensor_to_img(reconstructed)\n recon_img.save(f\"{SAVE_DIR}/recon{idx}_{patch_dims[0]}x{patch_dims[1]}.png\")\n\n def _assert_content(self, original, reconstructed):\n self.assertEqual(0, tf.keras.metrics.MeanSquaredError()(original, reconstructed))\n self.assertEqual(float(\"inf\"), metrics.psnr(original, reconstructed, max_val=255))\n self.assertEqual(1.0, metrics.ssim(original, reconstructed, max_val=255))\n self.assertEqual(float(\"inf\"), metrics.psnr_on_y((original/255), (reconstructed/255)))\n\n def _assert_shape(self, original, reconstructed):\n self.assertEqual(3, reconstructed.shape.rank)\n self.assertEqual(original.shape[-3], reconstructed.shape[-3])\n self.assertEqual(original.shape[-2], reconstructed.shape[-2])\n self.assertEqual(original.shape[-1], reconstructed.shape[-1])\n _original = original\n if original.shape.rank == 4:\n _original = tf.reshape(_original, (_original.shape[1:]))\n np.testing.assert_array_equal(_original.numpy(), reconstructed.numpy())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"bw0248/SimpleSR","sub_path":"tests/utils/image/test_image_utils.py","file_name":"test_image_utils.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19988044487","text":"from flask import Flask, render_template, request\r\nfrom collections import Counter\r\nimport string\r\nimport matplotlib.pyplot as plt\r\nfrom nltk import word_tokenize\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\n\r\napp = Flask(__name__)\r\n\r\ndef process_text(text):\r\n lower_case = text.lower()\r\n cleaned_text = lower_case.translate(str.maketrans('', '', string.punctuation))\r\n tokenized_words = word_tokenize(cleaned_text, \"english\")\r\n final_words = [word for word in tokenized_words if word not in stopwords.words('english')]\r\n \r\n return final_words\r\n\r\ndef analyze_emotions(final_words):\r\n emotion_list = []\r\n with open('emotions.txt', 'r') as file:\r\n for line in file:\r\n clear_line = line.replace('\\n', '').replace(',', '').replace(\"'\", '').strip()\r\n word, emotion = clear_line.split(':')\r\n if word in final_words:\r\n emotion_list.append(emotion)\r\n\r\n return Counter(emotion_list)\r\n\r\ndef sentiment_analyse(sentiment_text):\r\n score = SentimentIntensityAnalyzer().polarity_scores(sentiment_text)\r\n neg = score['neg']\r\n pos = score['pos']\r\n if neg > pos:\r\n return \"Negative Sentiment\"\r\n elif pos > neg:\r\n return \"Positive Sentiment\"\r\n else:\r\n return \"Neutral Sentiment\"\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\n\r\ndef index():\r\n if request.method == 'POST':\r\n text = request.form['text']\r\n final_words = process_text(text)\r\n emotion_counter = analyze_emotions(final_words)\r\n sentiment = sentiment_analyse(text)\r\n return render_template('result.html', emotion_counter=emotion_counter, sentiment=sentiment)\r\n return render_template('index.html')\r\n\r\nif __name__ == '_main_':\r\n app.run(debug=True)","repo_name":"restingdemon/AI-Project","sub_path":"SentimentAnalysis/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34408448450","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\nimport nurse_homepage\r\nimport nurse_login_page\r\nimport sql_test\r\n\r\n\r\n\r\nclass Guardians:\r\n def __init__(self, window):\r\n self.window=window\r\n self.window.geometry('1166x718')\r\n self.window.resizable(0, 0)\r\n self.window.state('zoomed')\r\n self.window.title('Guardians')\r\n self.window['bg']='#FFFFFF'\r\n\r\n # ========================================================================\r\n # ============ guardians Image ================================================\r\n # ========================================================================\r\n self.nurse_homepage_image=Image.open('images\\homepage.jpg')\r\n self.resized=self.nurse_homepage_image.resize((600,400))\r\n photo = ImageTk.PhotoImage(self.resized)\r\n self.nurse_homepage_image_label = Label(self.window, image=photo, bg='#FFFFFF')\r\n self.nurse_homepage_image_label.image = photo\r\n self.nurse_homepage_image_label.place(x=700, y=100)\r\n\r\n #=====patient id label==========\r\n self.label = Label(self.window, text=\"Patient ID\", bg='#FFFFFF',fg=\"black\",\r\n font=(\"Times New Roman\", 18, \"bold\"))\r\n self.label.place(x=300,y=550)\r\n\r\n #=====patient id entry box==========\r\n self.patient_id_entry = Entry(self.window, highlightthickness=3,relief=FLAT, bg=\"#ffffff\", fg=\"black\",\r\n font=(\"Times New Roman \", 14, \"bold\"),width=20)\r\n self.patient_id_entry.place(x=485,y=550)\r\n\r\n #=====show guardians button==========\r\n self.show_button = Button(self.window,text='show guardian info', font=(\"yu gothic ui\", 15, \"bold\"), bg=\"#90FFBD\", cursor=\"hand2\",\r\n borderwidth=0,fg='#000000', activebackground=\"#FFFFFF\",width=19,height=1,command=self.show)\r\n self.show_button.place(x=800, y=540)\r\n \r\n #=====back button==========\r\n self.back_button = Button(self.window,text='Back', font=(\"yu gothic ui\", 16, \"bold\"), bg=\"#4F77AA\", cursor=\"hand2\",\r\n borderwidth=0,fg='#000000', activebackground=\"#FFFFFF\",width=6,height=1,command= self.back)\r\n self.back_button.place(x=50, y=600)\r\n\r\n #=======log out button ===========\r\n self.logout_button = Button(self.window,text='logout', font=(\"yu gothic ui\", 15, \"bold\",\"underline\"), bg=\"#FFFFFF\", cursor=\"hand2\",\r\n borderwidth=0,fg='#000000', activebackground=\"#FFFFFF\",width=17,height=1,command=self.logout)\r\n self.logout_button.place(x=1200, y=40)\r\n\r\n #=====back function=========\r\n def back(self):\r\n win =Toplevel()\r\n nurse_homepage.NurseHomepage(win)\r\n self.window.withdraw()\r\n win.deiconify \r\n\r\n #=====logout function=========\r\n def logout(self):\r\n win=Toplevel()\r\n nurse_login_page.NurseLoginPage(win)\r\n self.window.withdraw()\r\n win.deiconify\r\n\r\n def show(self):\r\n Guardians.patient_id =self.patient_id_entry.get()\r\n select_query=\"\"\"select * from guardians where patient_id==?\"\"\"\r\n sql_test.sqlBase.cursor.execute(select_query,(Guardians.patient_id))\r\n result= sql_test.sqlBase.cursor.fetchall()\r\n\r\n if(result):\r\n print_records=''\r\n for record in result :\r\n print_records += str(record) + \" \"\r\n self.entry= Label(self.window,text=print_records,width=20,height=1,fg='black',bg='#ffffff',\r\n font=(\"Times New Roman\", 18, \"bold\"))\r\n self.entry.place(x=200,y=250)\r\n \r\n else:\r\n self.no_entries_box= Label(self.window,text=\"There are no guardians\",\r\n bg=\"#ffffff\",fg=\"black\",font=(\"Times New Roman \",18, \"bold\"))\r\n\r\n self.no_entries_box.place(x=200,y=250)\r\n\r\n self.patient_id_entry.delete(0,END)\r\n\r\ndef guardians():\r\n window = Tk()\r\n Guardians(window)\r\n window.mainloop()\r\n\r\n\r\nif __name__ == '__main__':\r\n guardians()","repo_name":"tasneemyousry/hospital-system","sub_path":"guardians.py","file_name":"guardians.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17691839058","text":"import os\nimport zipfile\n\nimport bokeh.embed\nimport pandas as pd\n\nfrom pydatalab.blocks.base import DataBlock\nfrom pydatalab.bokeh_plots import mytheme, selectable_axes_plot\nfrom pydatalab.file_utils import get_file_info_by_id\nfrom pydatalab.logger import LOGGER\n\nfrom .utils import read_bruker_1d\n\n\nclass NMRBlock(DataBlock):\n blocktype = \"nmr\"\n description = \"Simple NMR Block\"\n accepted_file_extensions = \".zip\"\n defaults = {\"process number\": 1}\n _supports_collections = False\n\n @property\n def plot_functions(self):\n return (self.generate_nmr_plot,)\n\n def read_bruker_nmr_data(self):\n if \"file_id\" not in self.data:\n LOGGER.warning(\"NMRPlot.read_bruker_nmr_data(): No file set in the DataBlock\")\n return\n\n zip_file_info = get_file_info_by_id(self.data[\"file_id\"], update_if_live=True)\n filename = zip_file_info[\"name\"]\n\n name, ext = os.path.splitext(filename)\n if ext.lower() not in self.accepted_file_extensions:\n LOGGER.warning(\n \"NMRBlock.read_bruker_nmr_data(): Unsupported file extension (must be .zip)\"\n )\n return\n\n # unzip:\n directory_location = zip_file_info[\"location\"] + \".extracted\"\n LOGGER.debug(f\"Directory location is: {directory_location}\")\n with zipfile.ZipFile(zip_file_info[\"location\"], \"r\") as zip_ref:\n zip_ref.extractall(directory_location)\n\n extracted_directory_name = os.path.join(directory_location, name)\n available_processes = os.listdir(os.path.join(extracted_directory_name, \"pdata\"))\n\n if self.data.get(\"selected_process\") not in available_processes:\n self.data[\"selected_process\"] = available_processes[0]\n\n try:\n df, a_dic, topspin_title, processed_data_shape = read_bruker_1d(\n os.path.join(directory_location, name),\n process_number=self.data[\"selected_process\"],\n verbose=False,\n )\n except Exception as error:\n LOGGER.critical(f\"Unable to parse {name} as Bruker project. {error}\")\n return\n\n serialized_df = df.to_dict() if (df is not None) else None\n\n # all data sorted in a fairly raw way\n self.data[\"processed_data\"] = serialized_df\n self.data[\"acquisition_parameters\"] = a_dic[\"acqus\"]\n self.data[\"processing_parameters\"] = a_dic[\"procs\"]\n self.data[\"pulse_program\"] = a_dic[\"pprog\"]\n\n # specific things that we might want to pull out for the UI:\n self.data[\"available_processes\"] = available_processes\n self.data[\"nucleus\"] = a_dic[\"acqus\"][\"NUC1\"]\n self.data[\"carrier_frequency_MHz\"] = a_dic[\"acqus\"][\"SFO1\"]\n self.data[\"carrier_offset_Hz\"] = a_dic[\"acqus\"][\"O1\"]\n self.data[\"recycle_delay\"] = a_dic[\"acqus\"][\"D\"][1]\n self.data[\"nscans\"] = a_dic[\"acqus\"][\"NS\"]\n self.data[\"CNST31\"] = a_dic[\"acqus\"][\"CNST\"][31]\n self.data[\"processed_data_shape\"] = processed_data_shape\n\n self.data[\"probe_name\"] = a_dic[\"acqus\"][\"PROBHD\"]\n self.data[\"pulse_program_name\"] = a_dic[\"acqus\"][\"PULPROG\"]\n self.data[\"topspin_title\"] = topspin_title\n\n def generate_nmr_plot(self):\n self.read_bruker_nmr_data() # currently calls every time plotting happens, but it should only happen if the file was updated\n if \"processed_data\" not in self.data or not self.data[\"processed_data\"]:\n self.data[\"bokeh_plot_data\"] = None\n return\n\n df = pd.DataFrame(self.data[\"processed_data\"])\n df[\"normalized intensity\"] = df.intensity / df.intensity.max()\n\n bokeh_layout = selectable_axes_plot(\n df,\n x_options=[\"ppm\", \"hz\"],\n y_options=[\n \"intensity\",\n \"intensity_per_scan\",\n \"normalized intensity\",\n ],\n plot_line=True,\n point_size=3,\n )\n bokeh_layout.children[0].x_range.flipped = True # flip x axis, per NMR convention\n\n self.data[\"bokeh_plot_data\"] = bokeh.embed.json_item(bokeh_layout, theme=mytheme)\n","repo_name":"elbee99/datalab","sub_path":"pydatalab/pydatalab/apps/nmr/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"277567140","text":"import csv\nimport boto3\nimport requests\n\nTHE_SRC_URL = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'\n\ndef create_dynamodb_table():\n dynamodb_client = boto3.client('dynamodb')\n\n try:\n response = dynamodb_client.create_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'date',\n 'AttributeType': 'S',\n },\n {\n 'AttributeName': 'state_county',\n 'AttributeType': 'S',\n },\n ],\n KeySchema=[\n {\n 'AttributeName': 'state_county',\n 'KeyType': 'HASH',\n },\n {\n 'AttributeName': 'date',\n 'KeyType': 'RANGE',\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1024,\n 'WriteCapacityUnits': 1024,\n },\n TableName='us-counties-covid-19',\n )\n except dynamodb_client.exceptions.ResourceInUseException as e:\n print(e)\n pass\n\ndef get_covid_19_data_as_json():\n url = THE_SRC_URL\n r = requests.get(url, allow_redirects=True)\n rows = [row for row in csv.reader(r.text.splitlines(), delimiter=',')]\n\n col_names = rows[0]\n items = []\n for row in rows[1:]:\n i = 0\n row_dic = {}\n for col_name in col_names:\n row_dic[col_name] = row[i]\n i += 1\n row_dic['state_county'] = row[2] + '_' + row[1]\n items.append(row_dic)\n return items\n\ndef batch_write(items):\n dynamodb = boto3.resource('dynamodb')\n db = dynamodb.Table('us-counties-covid-19')\n\n with db.batch_writer() as batch:\n for item in items:\n batch.put_item(Item=item)\n print(item)\n\nif __name__ == '__main__':\n #create_dynamodb_table()\n items = get_covid_19_data_as_json()\n batch_write(items)","repo_name":"agilebeat-inc/maprover--api-visualization","sub_path":"util/load_db.py","file_name":"load_db.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42934578304","text":"import artwork\nimport datetime\nimport time\nimport colorsys\nfrom wakey import WakeyThread\nfrom database import Colours\n\n# import bridge from connect script\nartwork.connect()\nfrom connect import b, ip\n\nthreads = []\n\n# things that can be listed -- lights (and status), groups/rooms\n\n# turn lights on and off -- e.g. turn OwenBedroom off / turn OwenBedroom on\n\n# first argument will be the command type\n\n# could create the commands with objects, but functions would be fine\n\n\n#################\n# #\n# UTILITIES #\n# #\n#################\n\n# get a group from group_name\ndef get_group(group_name):\n for g in b.groups:\n if g.name == group_name:\n return g\n\n\n# command needs to be vetted to allow spaces when quotation marks are used\n# this only allows one space max between words\ndef split(command):\n command = command.split()\n vetted_commands = []\n # variable to store combination\n comb = \"\"\n add_to_comb = False\n for i in command:\n if i[0] == '\"':\n comb += i[1:]\n add_to_comb = True\n elif i[-1] == '\"':\n comb += \" \" + i[:-1]\n vetted_commands.append(comb)\n comb = \"\"\n add_to_comb = False\n else:\n if add_to_comb == True:\n comb += \" \" + i\n else:\n vetted_commands.append(i)\n return vetted_commands\n\n\n# converts rgb to hsv and sets the light to the colour\ndef set_light_to_colour(light_name, rgb):\n h, s, v = colorsys.rgb_to_hsv(rgb[0], rgb[1], rgb[2])\n\n b.set_light(light_name, \"hue\", int(round(h * 65535)))\n b.set_light(light_name, \"sat\", int(round(s * 255)))\n b.set_light(light_name, \"bri\", int(round(v)))\n\n\n# gets ans to yes or no question\ndef get_ans():\n ans = \"\"\n while ans != \"y\" and ans != \"n\":\n ans = str.lower(input(\"y/n > \"))\n return ans\n\n\n# clears all finished threads\ndef clear_finished_threads():\n for t in threads:\n if not t.isAlive():\n threads.remove(t)\n\n\ndef kill_all_threads():\n for t in threads:\n t.stop()\n\n\n##################\n# #\n# COMMANDS #\n# #\n##################\n\n# ls (list function)\n# takes either lights or groups/rooms\ndef ls(args):\n if len(args) != 1:\n print(\"ERROR: 'ls' takes exactly 1 argument - [object]\")\n return\n\n # outputs the list of all specified object type\n object = str.lower(args[0])\n rstring = \"\"\n if object == \"lights\":\n print(\"\\nLIGHTS:\\n\")\n for l in b.lights:\n print(f\"NAME: {l.name}\\tON: {l.on}\")\n print()\n elif object == \"groups\" or object == \"rooms\":\n print(\"\\nGROUPS\\n\")\n for g in b.groups:\n print(f\"NAME: {g.name}\")\n print(\"LIGHTS: \")\n for l in g.lights:\n print(f\"\\tNAME: {l.name}\\tON: {l.on}\")\n print()\n elif object == \"colours\":\n print(\"\\nCOLOURS:\\n\")\n db = Colours()\n colours = db.get_all_names()\n for c in colours:\n print(c)\n print()\n elif object == \"wakemeup\":\n print(\"\\nWAKEMEUP:\\n\")\n for t in threads:\n print(f\"{t.light_name}@{t.wake_time}\")\n print()\n else:\n print(f\"'{object}' is not a valid object to list.\")\n\n\n# turns a light on or off\ndef turn(args):\n if len(args) != 2:\n print(\"ERROR: 'turn' takes exactly 2 args - [light_name] and [on/off]\")\n return\n\n light_name = args[0]\n status = str.lower(args[1])\n\n if status != \"on\" and status != \"off\":\n print(\"ERROR: Status is not valid.\")\n return\n\n if status == \"on\":\n is_on = True\n else:\n is_on = False\n\n print(f\"\\nTurning light '{light_name}' {status}...\\n\")\n b.set_light(str(light_name), \"on\", is_on)\n\n\n# turns all lights in one group/room on or off\ndef turnall(args):\n\n if len(args) != 2:\n print(\"ERROR: 'turnall' takes exactly 2 args - [group_name] and [on/off]\")\n return\n\n group_name = args[0]\n status = str.lower(args[1])\n\n if status != \"on\" and status != \"off\":\n print(\"ERROR: Status is not valid.\")\n return\n\n if status == \"on\":\n is_on = True\n else:\n is_on = False\n\n g = get_group(group_name)\n try:\n g.on = is_on\n print(f\"\\nTurning all lights in '{group_name}' {status}\\n\")\n except Exception:\n print(\"ERROR: this group does not exist.\")\n\n\n# change brightness of lights given name and brightness\ndef brightness(args):\n if len(args) != 2:\n print(\n \"ERROR: 'brightness' takes exactly 2 args - [light_name] and [brightness(0-255)]\"\n )\n return\n\n try:\n int(args[1])\n except Exception:\n print(\"ERROR: a number was not entered for the brightness.\")\n return\n\n light_name = args[0]\n brightness = int(args[1])\n\n print(f\"\\nSetting {light_name} brightness to {brightness}\\n\")\n b.set_light(light_name, \"bri\", brightness)\n\n\n# change brightness of all lights in a group/room\ndef brightnessall(args):\n if len(args) != 2:\n print(\n \"ERROR: 'brightnessall' takes exactly 2 args - [light_name] and [brightness(0-255)]\"\n )\n return\n\n try:\n int(args[1])\n except Exception:\n print(\"ERROR: a number was not entered for the brightness.\")\n return\n\n group_name = args[0]\n brightness = int(args[1])\n\n g = get_group(group_name)\n\n try:\n print(f\"\\nSetting {group_name} brightness to {brightness}\\n\")\n g.brightness = brightness\n except Exception:\n print(\"ERROR: this group name does not exist.\")\n return\n\n\n# turns specified light on at a specified time\ndef wakemeup(args):\n if len(args) != 3:\n print(\n \"ERROR: 'wakemeup takes exactly 3 arguments - [light_name] [hour] [minute]\"\n )\n return\n\n try:\n for i in args[1:]:\n int(i)\n except Exception:\n print(\"ERROR: at least one of the arguments is not an integer.\")\n return\n\n light_name = args[0]\n hours = int(args[1])\n mins = int(args[2])\n\n conds = [hours > 24, hours < 0, mins > 60, mins < 0]\n\n if any(conds):\n print(\"ERROR: hours or minutes are out of range.\")\n return\n # set datetime objects for now and wake time\n now = datetime.datetime.now()\n wake_time = datetime.datetime(now.year, now.month, now.day, hours, mins)\n\n # check whether the next time is today or tomorrow\n if wake_time < now:\n day = \"tomorrow\"\n wake_time += datetime.timedelta(days=1)\n else:\n day = \"today\"\n\n print(f\"Waiting to wake you up at {wake_time.hour}:{wake_time.minute} {day}...\")\n\n # starts thread so command line can still be used while waiting (wakey.py)\n t = WakeyThread(b, light_name, wake_time)\n threads.append(t)\n t.start()\n\n\ndef dontwakemeup(args):\n if len(args) != 0:\n print(\"ERROR: dontwakemeup takes no arguments.\")\n return\n kill_all_threads()\n\n\n# cycle through loads of different colours on all lights\ndef disco(args):\n if len(args) != 0:\n print(\"ERROR: disco takes no arguments\")\n return\n\n colours = [\n [255, 0, 0],\n [255, 255, 0],\n [0, 255, 0],\n [0, 255, 255],\n [0, 0, 255],\n [255, 0, 255],\n ]\n i = 0\n lights = []\n for l in b.lights:\n try:\n x = l.colormode\n if l.on:\n lights.append(l)\n else:\n print(f\"ERROR: {l.name} is turned off.\")\n except Exception:\n print(f\"ERROR: {l.name} does not have colour capabilities.\")\n if len(lights) == 0:\n print(\"ERROR: none of your colour-enabled lights are on.\")\n return\n print(\"PARTAY!!! (press ctrl+c to stop the disco)\")\n try:\n while True:\n try:\n for l in lights:\n set_light_to_colour(l.name, colours[i])\n i += 1\n if i == len(colours) - 1:\n i = 0\n time.sleep(0.5)\n except KeyboardInterrupt:\n return\n except KeyboardInterrupt:\n return\n\n\n# sets light colour to specific rgb colour\ndef setcol(args):\n if len(args) != 4:\n print(\n \"ERROR: setcol takes exactly 4 arguments - [light_name] [red (0-255)] [green (0-255)] [blue (0-255)]\"\n )\n return\n rgb = [args[1], args[2], args[3]]\n try:\n for i in range(0, len(rgb)):\n rgb[i] = int(rgb[i])\n except Exception:\n print(\"ERROR: one of the colour arguments was not an integer.\")\n return\n for i in rgb:\n if i > 255 or i < 0:\n print(\"ERROR: one of the colour values was out of range 0-255.\")\n return\n set_light_to_colour(args[0], rgb)\n\n\n# sets light to one of the saved colours\ndef col(args):\n if len(args) != 2:\n print(\"ERROR: col takes exactly 2 arguments - [light_name] [colour_name]\")\n return\n db = Colours()\n colour = db.get_colour(args[1])\n\n if colour is not None:\n rgb = colour[1:]\n set_light_to_colour(args[0], rgb)\n else:\n print(f\"ERROR: '{args[1]}' is not a defined colour.\")\n return\n\n\n# adds a new rgb colour to the saved colours\ndef newcol(args):\n if len(args) != 4:\n print(\n \"ERROR: newcol takes exactly 4 arguments - [light_name] [red (0-255)] [green (0-255)] [blue (0-255)]\"\n )\n return\n rgb = [args[1], args[2], args[3]]\n try:\n for i in range(0, len(rgb)):\n rgb[i] = int(rgb[i])\n except Exception:\n print(\"ERROR: one of the colour arguments was not an integer.\")\n return\n for i in rgb:\n if i > 255 or i < 0:\n print(\"ERROR: one of the colour values was out of range 0-255.\")\n return\n\n db = Colours()\n if db.get_colour(args[0]) is None:\n db.create_colour((args[0], rgb[0], rgb[1], rgb[2]))\n else:\n print(\n f\"ERROR: this colour already exists, please delete the preset with this name with 'delcol {args[0]}'.\"\n )\n\n\n# deletes a colour based on the name of the light\ndef delcol(args):\n if len(args) != 1:\n print(\"ERROR: delcol takes exactly 1 argument - [light_name]\")\n return\n\n db = Colours()\n colour = db.get_colour(args[0])\n\n if colour is not None:\n print(f\"Are you sure you want to delete the colour '{args[0]}'?\")\n ans = get_ans()\n if ans == \"y\":\n db.delete_colour(args[0])\n print(f\"'{args[0]}' has been deleted.'\")\n else:\n print(f\"ERROR: '{args[0]}' is not a defined colour.\")\n return\n\n\n# deletes all saved colours\ndef delallcol(args):\n if len(args) != 0:\n print(\"ERROR: delallcol takes no arguments\")\n return\n\n db = Colours()\n print(\"Are you sure you want to delete all saved colours?\")\n ans = get_ans()\n if ans == \"y\":\n db.delete_all_colours()\n print(\"all saved colours have been deleted.\")\n\n\ndef help():\n file = open(\"help.txt\", \"r\")\n help_message = file.read()\n file.close()\n print(help_message)\n\n\n# command handler - takes list with each element being word from command\ndef execute(command):\n method = str.lower(command[0])\n args = command[1:]\n if method == \"turn\":\n turn(args)\n elif method == \"ls\":\n ls(args)\n elif method == \"turnall\":\n turnall(args)\n elif method == \"brightness\":\n brightness(args)\n elif method == \"brightnessall\":\n brightnessall(args)\n elif method == \"wakemeup\":\n wakemeup(args)\n elif method == \"dontwakemeup\":\n dontwakemeup(args)\n elif method == \"setcol\":\n setcol(args)\n elif method == \"newcol\":\n newcol(args)\n elif method == \"col\":\n col(args)\n elif method == \"delcol\":\n delcol(args)\n elif method == \"delallcol\":\n delallcol(args)\n elif method == \"disco\":\n disco(args)\n elif method == \"help\":\n help()\n else:\n print(\"ERROR: invalid command, type help for all valid commands.\")\n\n\n# starts the cli loop\ndef start_CLI():\n artwork.hcontroller()\n while True:\n try:\n command = input(\"bridge@\" + ip + \":# \")\n clear_finished_threads()\n if str.lower(command) == \"exit\":\n kill_all_threads()\n return\n elif command.strip() == \"\":\n print(\"ERROR: invalid command, type help for all valid commands.\")\n else:\n execute(split(command))\n except KeyboardInterrupt:\n kill_all_threads()\n return\n\n\nif __name__ == \"__main__\":\n if b == False:\n print(\"ERROR: could not connect to a bridge.\")\n else:\n start_CLI()\n\n\n\"\"\"\nTODO:\nchange names of lights and groups\n\ndisco mode\n\nchange colour of groups\n\"\"\"\n","repo_name":"MiniEggz/phue_lights","sub_path":"hcontroller.py","file_name":"hcontroller.py","file_ext":"py","file_size_in_byte":12863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17765943178","text":"from datetime import datetime\nimport matplotlib.pyplot as plt\nimport datos, sqlite3, Interfaz\nimport tkinter as tk\nimport tkinter.messagebox\n\ndate = str(datetime.now())\n\naño = int(date[:4])\nmes = int(date[5:7])\n\ndef muestra (tipo_selec, año_selec):\n \"\"\"Muestra estadísticas de la pieza seleccionada en el año seleccionado.\"\"\"\n grafx = [1,2,3,4,5,6,7,8,9,10,11,12]\n grafy = []\n\n if tipo_selec == 1:\n tipo = \"RNM68\"\n elif tipo_selec == 2:\n tipo = \"RNH68\"\n elif tipo_selec == 3:\n tipo = \"RNH30\"\n elif tipo_selec == 4:\n tipo = \"RNH25\"\n elif tipo_selec == 5:\n tipo = \"PInd800\"\n elif tipo_selec == 6:\n tipo = \"LNM6\"\n elif tipo_selec == 7:\n tipo = \"LNH25\"\n elif tipo_selec == 8:\n tipo = \"LNH30\"\n else:\n tk.messagebox.showerror(\"Error\", \"Seleccione una pieza.\")\n\n if año_selec == 1:\n periodo = 2022\n elif año_selec == 2:\n periodo = 2023\n else:\n tk.messagebox.showinfo(\"Info\", \"Ningún año seleccionado. Se mostrará el año actual.\")\n periodo = año\n \n con = sqlite3.connect(\".\\datos\\{}_{}\".format(tipo, periodo))\n cur = con.cursor()\n cur.execute(\"\"\"SELECT Cantidad FROM {}_{}\"\"\".format(tipo, periodo))\n cantidad = cur.fetchall()\n for x in range (12):\n grafy.append(cantidad[x][0])\n con.close()\n\n plt.bar(grafx, grafy)\n plt.xlabel(\"Mes\")\n plt.ylabel(\"Cantidad\")\n plt.title(\"Producción {} {}\".format(tipo, periodo))\n plt.show()\n\n\n","repo_name":"DBallesteros96/Control-de-produccion","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10583890660","text":"import os\r\nimport sys\r\nimport time\r\n\r\ntry:\r\n\tuser_input = raw_input(\"\\n1. Capture Data \\n2. Train Data \\n3. Start ParkingSystem \\nPilih Menu:\")\r\n\tmenu = user_input\r\n\r\n\tif menu == '1':\r\n\t\texecfile('CaptureFace.py')\r\n\telif menu == '2':\r\n\t\texecfile('TrainData.py')\r\n\telif menu == '3':\r\n\t\texecfile('parking.py')\r\n \r\nexcept KeyboardInterrupt:\r\n\tGPIO.cleanup()\r\n\tsys.exit()\r\n \r\n","repo_name":"mifyusuf/FaceRecParkingSystem","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29557079184","text":"import sys\nfrom PySide2.QtGui import QIcon\nfrom PySide2.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QFileDialog, QVBoxLayout, QStyledItemDelegate\nfrom ui_main import Ui_MainWindow\nfrom pycomm3 import LogixDriver\nfrom PySide2.QtCore import QTimer, QThreadPool, QDateTime\nfrom PySide2.QtCharts import QtCharts\nfrom utils import resource_path\nfrom datetime import datetime, timedelta\nimport time\nimport csv\nimport json\n\nfrom workers import Worker\n\nfrom pyqtgraph import PlotWidget, plot, DateAxisItem\nimport pyqtgraph as pg\nfrom random import randint\n\ntry:\n # Include in try/except block if you're also targeting Mac/Linux\n from PySide2.QtWinExtras import QtWin\n myappid = 'wood.portapoll.1.0'\n QtWin.setCurrentProcessExplicitAppUserModelID(myappid) \nexcept ImportError:\n pass\nclass TimeAxisItem(pg.AxisItem):\n def tickStrings(self, values, scale, spacing):\n return [datetime.fromtimestamp(value) for value in values]\n\nclass MainWindow(QMainWindow):\n def __init__(self, app):\n super(MainWindow, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.setWindowTitle(\"PortaPoll\")\n \n self.run_poll = True\n app.aboutToQuit.connect(self.thread_stop)\n\n self.ui.pushButton_poll.setStyleSheet(\"\"\"\n QPushButton {\n background-color: red;\n color:white; \n } \n QPushButton:checked{\n background-color: rgb(35, 199, 35);\n border: none; \n }\n QPushButton:hover{ \n background-color: grey; \n border-style: outset; \n } \n \"\"\")\n\n with open(resource_path(\"config/settings.json\")) as f:\n self.settings = json.load(f)\n\n self.ui.tableWidget.setColumnCount(len(self.settings[\"tags\"]) + 1)\n self.ui.tableWidget.setHorizontalHeaderItem(0, QTableWidgetItem(\"Date\"))\n for i in range(0, len(self.settings[\"tags\"])):\n self.ui.tableWidget.setHorizontalHeaderItem(i+1, QTableWidgetItem(self.settings[\"tags\"][i]))\n self.ui.comboBox_chart_tag.addItem(self.settings[\"tags\"][i])\n \n self.ui.lineEdit_ip.setText(self.settings['default_ip'])\n self.ui.lineEdit_ip.editingFinished.connect(self.ip_change)\n self.ui.label_log_file.setText(self.settings[\"log_file\"])\n self.ui.pushButton_log_file.clicked.connect(self.log_file)\n\n self.threadpool = QThreadPool().globalInstance()\n print(\"Multithreading with maximum %d threads\" % self.threadpool.maxThreadCount())\n\n self.poller_thread()\n\n self.chart_table()\n\n def chart_table(self):\n #base = datetime.today()\n #self.x = [base - timedelta(seconds=x) for x in range(20)] # 100 time points\n self.x=[]\n self.y=[]\n #self.y = [0 for _ in range(20)] # 100 data points\n\n date_axis = TimeAxisItem(orientation='bottom')\n self.graph = pg.PlotWidget(axisItems = {'bottom': date_axis})\n #self.graph.plot(x=[x.timestamp() for x in self.x], y=self.y, clear=True)\n\n layout = QVBoxLayout(self)\n layout.addWidget(self.graph)\n self.ui.frame_chart.setLayout(layout)\n \n def refresh_chart(self):\n pen = pg.mkPen(color=(255, 0, 0))\n self.graph.plot(x=[x.timestamp() for x in self.x], y=self.y, pen=pen, clear=True)\n\n def log_file(self):\n file_name = QFileDialog.getSaveFileName(self, \"Save\", \"C:/Test Trailer Log.csv\", \"CSV (Comma delimited) (*.csv)\")\n if file_name:\n self.ui.label_log_file.setText(file_name[0])\n self.settings[\"log_file\"] = file_name[0]\n with open(resource_path(\"config/settings.json\"), 'w') as f:\n json.dump(self.settings, f, indent=4)\n\n def ip_change(self):\n self.settings['default_ip'] = self.ui.lineEdit_ip.text()\n with open(resource_path(\"config/settings.json\"), 'w') as f:\n json.dump(self.settings, f, indent=4)\n \n def thread_stop(self):\n self.run_poll = False\n\n def poller_thread(self):\n worker = Worker(self.poll_plc)\n self.threadpool.start(worker)\n\n def poll_plc(self):\n while self.run_poll:\n while self.ui.pushButton_poll.isChecked():\n if int(self.ui.label_countdown.text()) > self.ui.spinBox_poll.value():\n self.ui.label_countdown.setText(str(self.ui.spinBox_poll.value()))\n if int(self.ui.label_countdown.text()) < 1:\n self.ui.label_countdown.setText(str(self.ui.spinBox_poll.value()))\n try:\n \n with LogixDriver(self.ui.lineEdit_ip.text()) as plc:\n curr_datetime = datetime.now()\n date_stamp = curr_datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n \n \n row = self.ui.tableWidget.rowCount()\n if row >= self.ui.spinBox_chart_points.value():\n self.ui.tableWidget.removeRow(row-1)\n self.ui.tableWidget.insertRow(0)\n self.ui.tableWidget.setItem(0, 0, QTableWidgetItem(date_stamp))\n\n with open(self.ui.label_log_file.text(), \"a\", newline=\"\") as outfile:\n writer = csv.writer(outfile)\n for i in self.settings[\"tags\"]:\n fields = []\n fields.append(date_stamp)\n val = round(plc.read(i).value, 2)\n fields.append(i)\n fields.append(val)\n writer.writerow(fields)\n\n if i == self.ui.comboBox_chart_tag.currentText():\n self.x.append(curr_datetime)\n self.y.append(val)\n if len(self.x) > self.ui.spinBox_chart_points.value():\n self.x.pop(0)\n if len(self.y) > self.ui.spinBox_chart_points.value():\n self.y.pop(0)\n self.refresh_chart()\n\n self.ui.tableWidget.setItem(0, self.settings[\"tags\"].index(i)+1, QTableWidgetItem(\"%.2f\" % val))\n except Exception as error:\n self.ui.label_error.setStyleSheet(\"background-color: rgba(255, 0, 0, 0.5);\")\n self.ui.label_error.setText(str(error))\n time.sleep(10)\n else:\n self.ui.label_error.setStyleSheet(\"background-color: rgba(35, 199, 35, 1);\")\n self.ui.label_error.setText(\" \")\n \n time.sleep(1)\n self.ui.label_countdown.setText(str(int(self.ui.label_countdown.text()) - 1))\n\nif __name__ == \"__main__\":\n app = QApplication([])\n app.setWindowIcon(QIcon('images/sensor.ico'))\n window = MainWindow(app)\n window.show()\n sys.exit(app.exec_())","repo_name":"muelleRw/PortaPoll","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6049715256","text":"\n\nclass testClass:\n def __init__(self):\n self.name = None\n self.code = None\n\n def get_name(self, test):\n global name\n self.name = test['name']\n return self.name\n\n def get_code(self, data):\n global code, name\n self.name = data['name']\n self.code = data['code']\n return self.name, self.code\n\n def __str__(self):\n return self.name, self.code\n\n\nif __name__ == '__main__':\n t = testClass()\n print(t.__str__())\n test= {'name': 'test1'}\n print(t.get_name(test))\n print(t.__str__())\n","repo_name":"duhq123/testDjango","sub_path":"testStart/a0007/testclass.py","file_name":"testclass.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32250734890","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import TimeSeriesSplit, GridSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom dateutil.relativedelta import relativedelta\n\nfrom typing import Tuple\n\nfrom .loss import make_quad_loss\n\n\none_month = relativedelta(months=1)\ntscv = TimeSeriesSplit(n_splits=15)\n\ngrid_search_over = {\n 'max_depth': [10, 20, 30],\n 'max_features': ['auto', 'sqrt'],\n 'n_estimators': [100, 200, 300]\n}\n\ndef construct_rf_chunks(\n df: pd.DataFrame, \n lags: int, \n exog_df: pd.DataFrame = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n if isinstance(df, pd.Series):\n df = df.to_frame()\n\n if isinstance(exog_df, pd.Series):\n exog_df = exog_df.to_frame()\n\n add_exog = exog_df is not None\n\n delta_lags = relativedelta(months=lags) \n\n y = df.iloc[lags:]\n\n N, M = y.shape\n E = exog_df.shape[1] if add_exog else 0\n\n regr_size = lags*(M + E)\n\n X = np.empty((N, regr_size))\n\n for i, date in enumerate(y.index.tolist()):\n\n regressors = np.empty((1, regr_size))\n\n prev_date = date - delta_lags\n lagged_values = df.loc[prev_date:date - one_month]\n\n regressors[0, :M*lags] = lagged_values.to_numpy().reshape(1, -1)[0]\n\n if add_exog:\n lagged_exo = exog_df.loc[prev_date:date - one_month]\n regressors[0, -E*lags:] = lagged_exo.to_numpy().reshape(1, -1)[0]\n\n X[i] = regressors\n\n return X, y\n\n\ndef make_forecaster(\n df: pd.DataFrame,\n exog_df: pd.DataFrame = None,\n lags: int = 12,\n search_params = {},\n **cv_kwargs\n ):\n\n X, y = construct_rf_chunks(df, exog_df=exog_df, lags=lags)\n\n search_params = {**grid_search_over, **search_params}\n\n rf = RandomForestRegressor()\n\n clf = GridSearchCV(\n rf, search_params,\n scoring=make_quad_loss(alpha=0.5, scorer=True), \n **cv_kwargs\n )\n\n clf.fit(X, y)\n\n model = clf.best_estimator_ \n\n def forecaster(X: np.ndarray) -> np.ndarray: \n\n X = X.reshape(1, -1)\n\n y_hat = {}\n\n all_prediction = np.array([pred.predict(X)[0] for pred in model.estimators_])\n \n y_hat[\"lower_bound\"] = np.percentile(all_prediction, 2.5, axis = 0)\n y_hat[\"mean\"] = np.mean(all_prediction, axis = 0)\n y_hat[\"upper_bound\"] = np.percentile(all_prediction, 97.5, axis = 0)\n \n\n return y_hat\n\n return forecaster\n","repo_name":"NoFishLikeIan/tinbergen","sub_path":"homework/app_macro/week-two/forecast/rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"2131345670","text":"\"\"\"\nFilename: voting.py\n\nAuthors:\n Minseung Kim - msgkim@ucdavis.edu\n Jason Youn - jyoun@ucdavis.edu\n\nDescription:\n Resolve inconsistencies using voting algorithm.\n\nTo-do:\n\"\"\"\n# standard imports\nimport logging as log\nimport operator\n\n# third party imports\nimport numpy as np\nimport pandas as pd\n\n\nclass Voting:\n \"\"\"\n Inconsistency resolution using Sums.\n \"\"\"\n @classmethod\n def resolve_inconsistencies(cls, data, inconsistencies, answers=None):\n \"\"\"\n Resolve any inconsistency using voting algorithm.\n\n Inputs:\n data: integrated data that needs inconsistencies resolved\n inconsistencies: Dictionary containing inconsistency_id as key\n and list of inconsistent triples + source as value\n\n {\n 0: [(('Subject 1', 'Predicate 1', 'Object 1'), ['Source 1']),\n (('Subject 1', '!Predicate 1', 'Object 1'), ['Source 2'])],\n 1: [(('Subject 2', 'Predicate 2', 'Object 2'), ['Source 1']),\n ('Subject 2', '!Predicate 2', 'Object 2'), ['Sourcec 2'])],\n ...\n }\n answers:\n\n Returns:\n inconsistent_tuples_with_max_occurrence: dictionary where the key inconsistency_id\n and the value is a list of tuples where each tuple is of form\n (inconsistent_tuple, max_occurrence)\n np_present_trustworthiness_vector: vector containing trustworthiness\n of all the sources\n \"\"\"\n log.info('Resolving inconsistencies using Voting')\n\n np_present_trustworthiness_vector = np.array(pd.Series(data.groupby('Source').size()))\n inconsistent_tuples_with_max_occurrence = {}\n\n for inconsistency_id in inconsistencies:\n inconsistent_tuples = inconsistencies[inconsistency_id]\n occurrences = {inconsistent_tuple: len(sources)\n for inconsistent_tuple, sources in inconsistent_tuples}\n inconsistent_tuple, max_occurrence = max(\n occurrences.items(), key=operator.itemgetter(1))\n inconsistent_tuples_with_max_occurrence[inconsistency_id] = \\\n [(inconsistent_tuple, max_occurrence), ('dummy',)]\n\n return inconsistent_tuples_with_max_occurrence, None, np_present_trustworthiness_vector\n","repo_name":"IBPA/KIDS","sub_path":"kg_constructor/integrate_modules/inconsistency_correctors/voting.py","file_name":"voting.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"36416014288","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nTs = 1 # 正常情況的取樣周期,同時傳送端也會每個Ts送一個symbol\nL = 8 # oversampling factor\nFs = 1/Ts * L # 取樣頻率 (乘上L是因為過取樣(oversampled)的原因)\nFc = 1 # 載波頻率\n\nfor k in range(3):\n if k == 0:\n constellation = [-1+0j, 1+0j]\n constellation_name = 'BPSK'\n elif k == 1:\n constellation = [-1-1j, -1+1j, 1-1j, 1+1j]\n constellation_name = 'QPSK'\n elif k == 2:\n constellation = [1 + 1j, 1 + 3j, 3 + 1j, 3 + 3j, -1 + 1j, -1 + 3j, -3 + 1j, -3 + 3j, -1 - 1j, -1 - 3j, -3 - 1j, -3 - 3j, 1 - 1j, 1 - 3j, 3 - 1j, 3 - 3j]\n constellation_name = '16QAM'\n\n N_symbol = len(constellation) # 用來代表傳送端會送多少個symbol\n\n n_time = [0]*(L*N_symbol) # 用來代表過取樣的時間點\n for m in range(len(n_time)):\n n_time[m] = m * Ts/L\n\n t_time = [0]*(L*N_symbol*50) # 用來近似連續信號的時間\n for m in range(len(t_time)):\n t_time[m] = m * Ts/(L*50)\n\n # 先來決定baseband signal的discrete time sequence\n symbol_sequence = constellation[:] # 假設傳送端送出所有星座圖中的每個symbol,而這個離散序列就是symbol_sequence\n\n s = [0]*(L*N_symbol) # s就是對baseband的連續時間信號過取樣後的結果\n for m in range(len(symbol_sequence)):\n for n in range(L): # L為oversampling factor (就是會過取樣多少倍)\n s[m*L + n] = symbol_sequence[m]\n\n s_power = [0]*(L*N_symbol) # 這是將s的每個取樣點取絕對值平方,代表每個取樣點的能量\n for m in range(len(s)):\n s_power[m] = abs(s[m])**2\n\n # 最後還要算一下s的PAPR\n # 先算average power\n # 並順便找出peak power\n avg_power = 0\n peak_power = 0\n for m in range(len(s_power)):\n avg_power += s_power[m]\n if s_power[m] > peak_power:\n peak_power = s_power[m]\n avg_power /= len(s_power)\n PAPR = peak_power / avg_power\n PAPR_dB = 10*np.log10(PAPR)\n\n s_real = [0]*len(s) # s_real 就是過取樣信號s的實部\n s_imag = [0]*len(s) # s_imag 就是過取樣信號s的虛部\n for m in range(len(s)):\n s_real[m] = s[m].real\n s_imag[m] = s[m].imag\n\n plt.figure(constellation_name)\n plt.subplot(3,2,1)\n markerline, stemlines, baseline = plt.stem(n_time, s_real, markerfmt=' ')\n plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑\n plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑\n plt.title('{0}, {1} symbols, Ts={2}s, Fs={3}Hz, L={4}'.format(constellation_name, N_symbol, Ts, Fs, L))\n plt.ylabel(r'$\\~s_I[n]$')\n plt.subplot(3,2,3)\n markerline, stemlines, baseline = plt.stem(n_time, s_imag, markerfmt=' ')\n plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑\n plt.setp(stemlines, 'color', 'k') # 設定���衝顏色為黑\n plt.ylabel(r'$\\~s_Q[n]$')\n plt.subplot(3,2,5)\n markerline, stemlines, baseline = plt.stem(n_time, s_power, markerfmt=' ')\n plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑\n plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑\n plt.title('PAPR={0:.3F}dB'.format(PAPR_dB))\n plt.xlabel('time(s)\\nbaseband signal')\n plt.ylabel(r'$|\\~s_I[n]|^2+|\\~s_Q[n]|^2$')\n\n # 接下來決定passband signal的continuous time signal及過取樣後的discrete time sequence\n # 先決定continuous time signal吧 (在模擬中仍是discrete time sequence,只是時間點較密集,所以看不出來是離散信號)\n continuous_s = [0]*len(t_time)\n p = 0\n for m in range(len(symbol_sequence)):\n for n in range(len(t_time) // len(symbol_sequence)):\n continuous_s[p] = ( symbol_sequence[m] * np.exp(1j * 2*np.pi * Fc * t_time[p]) ).real\n p += 1\n\n # 決定對continuous time signal過取樣後的discrete time sequence\n discrete_s = [0]*len(n_time)\n p = 0\n for m in range(len(symbol_sequence)):\n for n in range(len(n_time) // len(symbol_sequence)):\n discrete_s[p] = ( symbol_sequence[m] * np.exp(1j * 2*np.pi * Fc * n_time[p]) ).real\n p += 1\n\n # 接下來對continuous time signal和discrete time sequence的每一個點取平方來得到每一點的能量\n continuous_s_power = [0]*len(t_time)\n discrete_s_power = [0]*len(n_time)\n for m in range(len(continuous_s_power)):\n continuous_s_power[m] = abs(continuous_s[m])**2\n for m in range(len(discrete_s_power)):\n discrete_s_power[m] = abs(discrete_s[m])**2\n\n # 最後透過discrete time sequence找passband signal 的PAPR\n # 先算average power\n # 並順便找出peak power\n avg_power = 0\n peak_power = 0\n for m in range(len(discrete_s_power)):\n avg_power += discrete_s_power[m]\n if discrete_s_power[m] > peak_power:\n peak_power = discrete_s_power[m]\n avg_power /= len(discrete_s_power)\n PAPR = peak_power / avg_power\n PAPR_dB = 10 * np.log10(PAPR)\n\n plt.figure(constellation_name)\n plt.subplot(2, 2, 2)\n plt.plot(t_time, continuous_s, color='red', linestyle='--')\n markerline, stemlines, baseline = plt.stem(n_time, discrete_s, markerfmt=' ')\n plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑\n plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑\n plt.title('{0}, {1} symbols, Ts={2}s, Fs={3}Hz, L={4}'.format(constellation_name, N_symbol, Ts, Fs, L))\n plt.ylabel(r'$s[n]$')\n plt.subplot(2,2,4)\n plt.plot(t_time, continuous_s_power, color='red', linestyle='--')\n markerline, stemlines, baseline = plt.stem(n_time, discrete_s_power, markerfmt=' ')\n plt.setp(baseline, 'color', 'k') # 設定底線顏色為黑\n plt.setp(stemlines, 'color', 'k') # 設定脈衝顏色為黑\n plt.title('PAPR={0:.3F}dB'.format(PAPR_dB))\n plt.xlabel('time (s)\\npassband signal')\n plt.ylabel(r'$|s[n]|^2$')\n\nplt.show()\n\n\n\n","repo_name":"XassassinXsaberX/communication-simulation","sub_path":"OFDM/PAPR reduction/PAPR (single carrier).py","file_name":"PAPR (single carrier).py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"18708462687","text":"import json as js\nfrom tkinter import *\nfrom tkinter import messagebox, filedialog\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\nfrom helper import base, dirn, get_raw_data, get_data_len, get_datetime, get_endtime, icopath, millisecs, now, secs, strfdelta, write_data\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom datetime import datetime as dt\nimport os\nfrom pprint import pprint\nimport click\n\ndef dicto(_file, _dict):\n print(\"Write into\", _file)\n with open(_file, \"w\") as json_file:\n print(\"Dump\")\n printd(_dict)\n js.dump(_dict, json_file)\n\ndef dicti(_file):\n with open(_file) as json_file:\n return js.load(json_file)\n\ndef printd(_dict):\n for tskey in _dict.keys():\n print(\"File:\", tskey)\n curfile = _dict[tskey]\n for condkey in curfile.keys():\n if type(curfile[condkey]) is dict:\n cond = curfile[condkey]\n print(\"| \")\n print(\"+-- Condition:\", condkey)\n for key in cond.keys():\n if key == \"range\":\n print(\" | \")\n print(\" +-- from\", cond[key][0], \"to\", cond[key][1])\n\nclass App():\n def __init__(self):\n self.root = Tk()\n ico = icopath() + 'glue.png'\n img = ImageTk.PhotoImage(Image.open(ico))\n self.root.tk.call('wm', 'iconphoto', self.root._w, img)\n self.root.title('flyPAD Conditioner')\n self.root.configure(bg='white')\n self.root.resizable(0,0)\n self.nextbutton = -1\n\n style = ttk.Style()\n style.configure('.', font=('Helvetica', 12))\n\n topframe = Frame(self.root)\n topframe.pack()\n self.lb = Listbox(topframe)\n lbxw = 5\n self.lb.pack(side=LEFT)\n\n scrollbar = Scrollbar(topframe)\n scrollbar.pack(side=LEFT)\n\n # attach listbox to scrollbar\n self.lb.config(width=50, yscrollcommand=scrollbar.set)\n scrollbar.config(command=self.lb.yview)\n\n bottomframe = Frame(self.root)\n bottomframe.pack(side=BOTTOM)\n # add button\n add = self.add_button(bottomframe, \"add.png\", self.add)\n # remove button\n remove = self.add_button(bottomframe, \"remove.png\", self.remove)\n # glue button\n glue = self.add_button(bottomframe, \"edit.png\", self.edit)\n\n ### data structure\n self.data = {}\n self.buffer = [] # list of buffer times in secs\n\n def add(self):\n files = filedialog.askopenfilenames(title='Choose file/s to load')\n for _file in files:\n dtime = get_datetime(_file)\n self.data[dtime] = {}\n self.data[dtime][\"filename\"] = _file\n self.data[dtime][\"range\"] = [1 , 16] # arena indices\n\n self.lb.delete(0, END)\n sortkeys = sorted(self.data.keys())\n for key in sortkeys:\n self.lb.insert(END, key)\n self.update()\n\n\n def add_button(self, _root, _name, _command):\n button = Button(_root,justify = LEFT, highlightthickness=0,bd=0, bg='white', command=_command)\n im = self.add_image(_name)\n button.config(image=im, width=40, height=40)\n button.image = im\n button.pack(side=LEFT)\n\n def add_image(self, _name):\n image = Image.open(icopath()+_name)\n image = image.resize((36, 36), Image.ANTIALIAS) #The (250, 250) is (height, width)\n return ImageTk.PhotoImage(image)\n\n def clear_fig(self):\n self.ax.cla()\n self.ax.axis('off')\n self.canvas.draw()\n\n def edit(self, nch=64, fs=100):\n sortkeys = sorted(self.data.keys())\n if len(sortkeys):\n files = [self.data[sortkey][\"filename\"] for sortkey in sortkeys]\n fulldata = get_raw_data(files[0])\n lastvalue = fulldata[-64:]\n for ind, _file in enumerate(files[1:]):\n data = get_raw_data(_file)\n bufferdata = np.tile(lastvalue, int(fs*self.buffer[ind]))\n fulldata = np.concatenate((fulldata, bufferdata, data))\n\n ### CHECK LENGTHS\n print(self.totallen, \" == \", fulldata.shape[0]/nch, \"?\")\n # saving file\n asksave = messagebox.askquestion(\"Saving glued data\", \"Do you want to save glued data into file?\", icon='warning')\n if asksave == 'yes':\n savefile = filedialog.asksaveasfilename(title=\"Save datafile as...\", defaultextension=\"\", initialdir=dirn(files[0]), initialfile=\"GLUED\"+base(files[0]))\n write_data(savefile, fulldata)\n\n def next_button(self):\n self.nextbutton += 1\n return self.nextbutton\n\n def refresh_fig(self,x,y,col,resize=True):\n self.ax.plot(x,y,col, linewidth=4)\n ax = self.canvas.figure.axes[0]\n if resize:\n ax.set_xlim(0, x.max())\n ax.set_ylim(0.95, 1.05)\n self.canvas.draw()\n\n def remove(self):\n cursel = self.lb.curselection()[0]\n remkey = dt.strptime(self.lb.get(cursel), \"%Y-%m-%d %H:%M:%S\")\n self.data.pop(remkey, None)\n self.lb.delete(ANCHOR)\n self.update()\n\n def update(self):\n sortkeys = sorted(self.data.keys())\n if len(sortkeys) > 0:\n endtime = get_endtime(sortkeys[-1], self.data[sortkeys[-1]][\"length\"])\n self.totallen = endtime - sortkeys[0]\n self.refresh_fig(np.array([0, secs(self.totallen)]),np.array([1,1]), 'k-')\n if len(sortkeys) > 1:\n self.buffer = []\n for ind in range(len(sortkeys)-1):\n #print(ind)\n start = get_endtime(sortkeys[ind], self.data[sortkeys[ind]][\"length\"]) - sortkeys[0]\n end = sortkeys[ind+1] - sortkeys[0]\n delta = secs(end) - secs(start)\n if delta < 0:\n this_file = self.data[sortkeys[ind]][\"filename\"]\n that_file = self.data[sortkeys[ind+1]][\"filename\"]\n messagebox.showwarning(\"Warning: Overlap detected\", base(this_file)+ \" and \"+base(that_file)+\" seem to overlap. Glued data will be invalid.\")\n self.buffer.append(0)\n else:\n self.buffer.append(secs(end) - secs(start))\n self.refresh_fig(np.array([ secs(start) , secs(end)]),np.array([1,1]), 'r-', resize=False)\n else:\n self.clear_fig()\n\ndef main(argv):\n app = App()\n app.root.mainloop()\n\nif __name__ == \"__main__\":\n #startdt = now()\n #main(sys.argv[1:])\n #print(\"Done. Runtime:\", strfdelta(now() - startdt, \"%H:%M:%S\"))\n print(\"Unit test - fp_conditions.py\")\n conditions = {}\n NA = 32 #number of arenas\n Tk().withdraw()\n deflist = []\n askload = messagebox.askquestion(\"Open defaults file\", \"Do you want to load defaults from file?\", icon='warning')\n deffile = \"E:/Dennis/Google Drive/PhD Project/Data/Jan-Feb/fulllist.txt\"\n if askload == 'yes':\n deffile = filedialog.askopenfilename(title='Choose defaults to load')\n print(deffile)\n with open(deffile) as f:\n for line in f:\n symb = \"'\"\n st = line.find(symb)+1\n en = line.rfind(symb)-4 ## removes 8dD\n deflist.append(line[st:en])\n\n files = filedialog.askopenfilenames(title='Choose file/s to load')\n counter = 0\n for _file in files:\n dtime = get_datetime(_file)\n strtime = dtime.strftime(\"%y-%m-%dT%H:%M:%S\")\n conditions[strtime] = {}\n dictf = conditions[strtime]\n dictf[\"filename\"] = _file\n done = False\n conds = []\n print(\"Enter conditions for \"+strtime+\":\")\n while done == False:\n if len(conds) > 0:\n print(\"Conditions:\", conds)\n click.echo('Next line: ' + deflist[counter] + ' Correct? [y/n/d=done]', nl=True)\n c = click.getchar().decode(\"utf-8\")\n print(c)\n if c == \"y\" or c == \"b'y'\":\n conds.append(deflist[counter])\n counter += 1\n elif c == \"n\" or c == \"b'n'\":\n counter += 1\n if counter == len(deflist):\n counter = 0\n elif c == \"d\" or c == \"b'd'\":\n done = True\n elif c == \"b\" or c == \"b'd'\":\n counter -= 1\n elif c == \"r\" or c == \"b'd'\":\n conds.pop()\n elif c == \"c\" or c == \"b'd'\":\n conds.append(deflist[0])\n else:\n inp = input(\"custom lines:\\n\")\n conds = [int(s) for s in inp.split() if s.isdigit()]\n done = False\n for ind, cond in enumerate(conds):\n cond = \"JRC_SS0\" + cond\n dictf[cond] = {}\n dictc = dictf[cond]\n st = ind*int(NA/len(conds))+1\n en = (ind+1)*int(NA/len(conds))\n dictc[\"range\"] = [st , en] # arena indices\n savefile = filedialog.asksaveasfilename(title=\"Save datafile as...\", defaultextension=\".json\", initialdir=dirn(files[0]))\n dicto(savefile, conditions)\n","repo_name":"degoldschmidt/ribeirolab-codeconversion","sub_path":"python/flyPAD/fp_conditions.py","file_name":"fp_conditions.py","file_ext":"py","file_size_in_byte":9276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23131054031","text":"import sys\r\nimport movement as mo\r\nimport inventory as i\r\nimport weapons as w\r\nimport your_character as yc\r\n\r\n\r\n# menus are important. Let's add them.\r\nmenus ={\r\n \"main\": [\"walk\", \"your character\", \"inventory\"],\r\n \"walk\": [\"north\", \"south\", \"east\", \"west\"],\r\n \"yes_no\": [\"yes\", \"no\"],\r\n \"your character\": [\"info\", \"set name\", \"set hero name\"],\r\n \"inventory\": [],\r\n} \r\n\r\n\r\n# Making said menus functional.\r\n# Function to display menus. \r\ndef display_menu(name):\r\n \"\"\"Displays chosen menu to user.\"\"\"\r\n menuvalues = menus[name]\r\n if name == \"main\":\r\n print(\"What would you like to do? \")\r\n elif name == \"walk\":\r\n print(\"Which direction would you like to go?\")\r\n elif name == \"your character\":\r\n print(\"Choose an action to take relating to your character.\")\r\n elif name == \"yes_no\":\r\n print(\"Which option would you like to select?\")\r\n [print(action.title()) for action in menuvalues]\r\n\r\n\r\n# User input function with error handling.\r\ndef user_action(section):\r\n \"\"\"Gets user input and checks for bogus answers.\"\"\"\r\n menuvalues = menus[section]\r\n correction = \"none\"\r\n action_inp = input(\"Enter any of these, or \\\"quit\\\" to exit: \")\r\n if action_inp in menuvalues or action_inp == \"quit\":\r\n if action_inp == \"quit\":\r\n print(\"Ok, farewell, future X-Man.\")\r\n sys.exit()\r\n else:\r\n return action_inp\r\n else:\r\n correct = \"no\"\r\n attempts = 1\r\n while correct != \"yes\":\r\n if attempts == 1:\r\n print(\"That is not a valid option. Please try again\")\r\n elif attempts > 1 and attempts <= 3:\r\n print(\"Sorry, this option is also incorrect. Please try again.\")\r\n else:\r\n print(\"Ok, you must be joking. Please enter a VALID option.\")\r\n display_menu(section)\r\n attempts += 1\r\n action_inp = input(\"Enter any of these, or \\\"quit\\\" to exit: \")\r\n if action_inp in menuvalues or action_inp == \"quit\":\r\n correct = 'yes'\r\n if action_inp == \"quit\":\r\n print(\"Ok, farewell, future X-Man.\")\r\n sys.exit()\r\n else:\r\n return action_inp\r\n \r\n \r\n# Function which sends user back to main menu.\r\ndef main_menu():\r\n \"\"\"For within other functions. Goes back to main menu.\"\"\"\r\n display_menu(\"main\")\r\n m = user_action(\"main\")\r\n return m\r\n\r\n\r\n# menu interactions \r\ndef menu_actions(menu):\r\n \"\"\"Enables menu interaction.\"\"\"\r\n if menu == \"walk\":\r\n mo.move(mo.row, mo.col)\r\n mo.location(mo.row, mo.col)\r\n elif menu == \"inventory\":\r\n i.read_inventory()\r\n elif menu == \"your character\":\r\n yc.char_acts()","repo_name":"kaitlynnbeston1/Modules","sub_path":"main_funcs.py","file_name":"main_funcs.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26242334928","text":"import numpy as np\nfrom sklearn.utils import resample\nimport graphviz\n\ndef entropy(counts):\n counts = counts/sum(counts)\n return -np.sum(counts * np.log2(counts + 1e-100))\n\ndef gini(counts):\n counts = counts/sum(counts)\n return 1 - np.sum(counts * counts)\n\ndef mean_err_rate(counts):\n counts = counts/sum(counts)\n return 1 - max(counts)\n\nclass AbstractSplit:\n \"\"\"Split the examples in a tree node according to a criterion.\n \"\"\"\n def __init__(self, attr):\n self.attr = attr\n\n def __call__(self, x):\n \"\"\"Return the subtree corresponding to x.\"\"\"\n raise NotImplementedError\n\n def build_subtrees(self, df, subtree_kwargs):\n \"\"\"Recuisively build the subtrees.\"\"\"\n raise NotImplementedError\n\n def iter_subtrees(self):\n \"\"\"Return an iterator over subtrees.\"\"\"\n raise NotImplementedError\n\n def add_to_graphviz(self, dot):\n \"\"\"Add the split to the graphviz vizluzation.\"\"\"\n raise NotImplementedError\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.attr}\"\n\nclass CategoricalMultivalueSplit(AbstractSplit):\n def build_subtrees(self, df, subtree_kwargs):\n self.subtrees = {}\n for group_name, group_df in df.groupby(self.attr):\n child = Tree(group_df, **subtree_kwargs)\n self.subtrees[group_name] = child\n\n def __call__(self, x):\n # Return the subtree for the given example\n attr = self.attr\n if x[attr] in self.subtrees:\n return self.subtrees[x[attr]]\n return None\n\n def iter_subtrees(self):\n return self.subtrees.values()\n \n def add_to_graphviz(self, dot, parent, print_info):\n for split_name, child in self.subtrees.items():\n child.add_to_graphviz(dot, print_info)\n dot.edge(f'{id(parent)}', f'{id(child)}',\n label=f'{split_name}')\n\ndef get_categorical_split_and_purity(df, parent_purity, purity_fun, attr,\n normalize_by_split_entropy=False):\n \"\"\"Return a multivariate split and its purity.\n Args:\n df: a dataframe\n parent_purity: purity of the parent node\n purity_fun: function to compute the purity\n attr: attribute over whihc to split the dataframe\n normalize_by_split_entropy: if True, divide the purity gain by the split\n entropy (to compute https://en.wikipedia.org/wiki/Information_gain_ratio)\n \n Returns:\n pair of (split, purity_gain)\n \"\"\"\n split = CategoricalMultivalueSplit(attr)\n # Compute the purity after the split\n purity = 0\n for group_name, group_df in df.groupby(attr):\n # purity of all values in descending order\n purity += purity_fun(group_df['target'].value_counts()) * len(group_df)\n \n purity /= len(df)\n purity_gain = parent_purity - purity\n if normalize_by_split_entropy:\n purity_gain /= entropy(df[attr].value_counts())\n return split, purity_gain\n\ndef get_split(df, criterion='infogain', nattrs=None):\n # Implement termination criteria:\n # 1. Node is pure\n target_value_counts = df['target'].value_counts()\n if len(target_value_counts) == 1:\n return None\n # 2. No split is possible\n # First get a list of attributes that can be split\n possible_splits = [c for c in df.columns if c != 'target' and len(df[c].value_counts()) > 1]\n # specified nattrs number\n if nattrs is not None:\n np.random.shuffle(possible_splits)\n possible_splits = possible_splits[:nattrs]\n # Terminate early if none are possivle\n if not possible_splits:\n return None\n \n # Get the base purity measure and the purity function\n if criterion in ['infogain', 'infogain_ratio']:\n purity_fun = entropy\n elif criterion in ['mean_err_rate']: \n purity_fun = mean_err_rate\n elif criterion in ['gini']:\n purity_fun = gini\n else:\n raise Exception(\"Unknown criterion: \" + criterion)\n base_purity = purity_fun(target_value_counts)\n\n best_purity_gain = -1\n best_split = None\n\n # Random Forest support\n # Randomize the split by restricting the number of attributes\n \n for attr in possible_splits:\n if np.issubdtype(df[attr].dtype, np.number):\n # Handling of numerical attributes will be defined later, in a manner \n # similar to categorical ones\n split_sel_fun = get_numrical_split_and_purity\n else:\n split_sel_fun = get_categorical_split_and_purity\n \n split, purity_gain = split_sel_fun(\n df, base_purity, purity_fun, attr,\n normalize_by_split_entropy=criterion.endswith('ratio'))\n \n if purity_gain > best_purity_gain:\n best_purity_gain = purity_gain\n best_split = split\n return best_split\n\nclass Tree:\n def __init__(self, df, **kwargs):\n super().__init__()\n # Assert that threre are no missing values,\n # TODO: remove this for bonus problem #XXX\n assert not df.isnull().values.any()\n \n # We need to let subrees know about all targets to properly color nodes\n if 'all_targets' not in kwargs:\n kwargs['all_targets'] = sorted(df['target'].unique())\n # Save keyword arguments to build subtrees\n kwargs_orig = dict(kwargs)\n \n # Get kwargs we know about, remaning ones are for splitting\n self.all_targets = kwargs.pop('all_targets')\n \n # Save debug info for visualization\n self.counts = df['target'].value_counts()\n self.info = {\n 'num_samples': len(df),\n 'entropy': entropy(self.counts),\n 'gini': gini(self.counts)\n }\n \n self.split = get_split(df, **kwargs)\n if self.split:\n #print('!!S', self.split)\n self.split.build_subtrees(df, kwargs_orig)\n\n def upper_confidence_interval(self, f, N, z=0.5):\n # http://chrome.ws.dei.polimi.it/images/6/62/IRDM2015-04-DecisionTreesPruning.pdf?fbclid=IwAR2j1xK_WTsF77rUucQW1q-y09s3EWHgfX52H6_3hXO_MTyGVSs5Fsoi1Sc\n return (f + ((z ** 2) / (2 * N)) + z * ((f / N - (f ** 2) / N + z ** 2 / (4 * (N ** 2))) ** 0.5)) / (1 + (z ** 2) / N)\n \n\n def confidence_interval_pruning(self):\n if self.split:\n for c in self.split.iter_subtrees():\n c.confidence_interval_pruning()\n\n n = self.info[\"num_samples\"]\n current_error = self.counts / np.sum(self.counts)\n current_error = list(sorted(list(current_error), reverse=True))\n current_error = self.upper_confidence_interval(np.sum(current_error[1:]), n)\n # print(current_error)\n self.info[\"confidence_error\"] = current_error\n\n if self.split:\n children_error = 0\n for c in self.split.iter_subtrees():\n children_error += (c.info['num_samples']/n) * c.info[\"confidence_error\"]\n\n if children_error > current_error:\n self.split = None\n self.info[\"splitted\"] = True\n\n def get_target_distribution(self, sample):\n # TODO: descend into subtrees and return the leaf target distribution\n if self.split is not None:\n subtree = self.split(sample)\n if subtree is not None:\n return subtree.get_target_distribution(sample)\n else:\n return self.counts / self.info['num_samples']\n else:\n return self.counts / self.info['num_samples']\n \n def classify(self, sample):\n # TODO: classify the sample by descending into the appropriate subtrees.\n if self.split is not None:\n subtree = self.split(sample)\n if subtree is not None:\n return subtree.classify(sample)\n else:\n # idmax() == This method is the DataFrame version of ndarray.argmax\n return self.counts.idxmax()\n else:\n return self.counts.idxmax()\n \n def draw(self, print_info=True):\n dot = graphviz.Digraph()\n self.add_to_graphviz(dot, print_info)\n return dot\n\n def add_to_graphviz(self, dot, print_info):\n freqs = self.counts / self.counts.sum()\n freqs = dict(freqs)\n colors = []\n freqs_info = []\n for i, c in enumerate(self.all_targets):\n freq = freqs.get(c, 0.0)\n if freq > 0:\n colors.append(f\"{i%9 + 1};{freq}\")\n freqs_info.append(f'{c}:{freq:.2f}')\n colors = ':'.join(colors)\n labels = [' '.join(freqs_info)]\n if print_info:\n for k,v in self.info.items():\n labels.append(f'{k} = {v}')\n if self.split:\n labels.append(f'split by: {self.split.attr}')\n dot.node(f'{id(self)}',\n label='\\n'.join(labels), \n shape='box',\n style='striped',\n fillcolor=colors,\n colorscheme='set19')\n if self.split:\n self.split.add_to_graphviz(dot, self, print_info)\n\nclass NumericalSplit(AbstractSplit):\n def __init__(self, attr, th):\n super(NumericalSplit, self).__init__(attr)\n self.th = th\n \n def build_subtrees(self, df, subtree_kwargs):\n self.subtrees = (\n Tree(df[df[self.attr] <= self.th], **subtree_kwargs),\n Tree(df[df[self.attr] > self.th], **subtree_kwargs))\n\n def __call__(self, x):\n if x[self.attr] <= self.th:\n return self.subtrees[0]\n else:\n return self.subtrees[1]\n \n def __str__(self):\n return f\"NumericalSplit: {self.attr} <= {self.th}\"\n\n def iter_subtrees(self):\n return self.subtrees\n \n def add_to_graphviz(self, dot, parent, print_info):\n self.subtrees[0].add_to_graphviz(dot, print_info)\n dot.edge(f'{id(parent)}', f'{id(self.subtrees[0])}',\n label=f'<= {self.th:.2f}')\n self.subtrees[1].add_to_graphviz(dot, print_info)\n dot.edge(f'{id(parent)}', f'{id(self.subtrees[1])}',\n label=f'> {self.th:.2f}')\n\n\ndef get_numrical_split_and_purity(df, parent_purity, purity_fun, attr,\n normalize_by_split_entropy=False):\n \"\"\"Find best split thereshold and compute the average purity after a split.\n Args:\n df: a dataframe\n parent_purity: purity of the parent node\n purity_fun: function to compute the purity\n attr: attribute over whihc to split the dataframe\n normalize_by_split_entropy: if True, divide the purity gain by the split\n entropy (to compute https://en.wikipedia.org/wiki/Information_gain_ratio)\n \n Returns:\n pair of (split, purity_gain)\n \"\"\"\n attr_df = df[[attr, 'target']].sort_values(attr)\n targets = attr_df['target']\n values = attr_df[attr]\n # Start with a split that puts all the samples into the right subtree\n right_counts = targets.value_counts()\n left_counts = right_counts * 0\n\n best_split = None\n best_purity_gain = -1\n N = len(attr_df)\n for row_i in range(N - 1):\n # Update the counts of targets in the left and right subtree and compute\n # the purity of the slipt for all possible thresholds!\n # Return the best split found.\n\n # Remember that the attribute may have duplicate values and all samples\n # with the same attribute value must end in the same subtree!\n row_target = targets.iloc[row_i]\n left_counts[row_target] -= 1\n right_counts[row_target] -= 1\n\n if attr_df.iloc[row_i][0] != attr_df.iloc[row_i + 1][0]:\n children_purity = (row_i + 1) * purity_fun(left_counts) + (N - row_i - 1) * purity_fun(right_counts)\n children_purity /= N\n purity = parent_purity - children_purity\n\n if purity > best_purity_gain:\n best_purity_gain = purity\n # our threshold\n attr_mean = (attr_df.iloc[row_i][0] + attr_df.iloc[row_i + 1][0]) / 2\n best_split = NumericalSplit(attr, attr_mean)\n\n \n if normalize_by_split_entropy:\n best_purity_gain /= entropy(targets.value_counts())\n return best_split, best_purity_gain\n\nclass RandomForest:\n def __init__(self, train, test, trees_num, criterion, nattrs):\n self.train = train\n self.test = test\n self.trees_num = trees_num\n self.criterion = criterion\n self.nattrs = nattrs\n self.trees = []\n self.errors = [] # array of tuples of 3 args\n self.make_forest()\n\n def make_forest(self):\n for t in range(self.trees_num):\n tree, oob = self.make_tree()\n self.trees.append(tree)\n print(\"Tree \",t,\" planted\")\n\n # error counts\n tree_error = self.tree_error(tree, self.test)\n oob_error = self.tree_error(tree, oob)\n forest_error = self.forest_error(self.test)\n self.errors.append([tree_error, oob_error, forest_error])\n\n def make_tree(self):\n # with bagging\n train_df = resample(self.train, n_samples=len(self.train))\n # print(\"PPPOF1.5\")\n # oob = [i for i in train_df if i not in self.train]\n oob = pd.DataFrame(self.train.loc[i] for i in self.train.index if i not in train_df.index)\n tree = Tree(train_df, criterion=self.criterion, nattrs=self.nattrs)\n # print(oob)\n return tree, oob\n\n def tree_error(self, tree, dataset):\n targets = [tree.classify(dataset.iloc[i]) for i in range(len(dataset))]\n # print(targets)\n # print(dataset['target'])\n classification = np.array(np.array(dataset['target']) == targets)\n return (len(classification) - np.count_nonzero(classification)) / len(dataset)\n\n def forest_error(self, dataset):\n forest_targets = np.array([[t.classify(dataset.iloc[i]) for i in range(len(dataset))] for t in self.trees])\n\n after_majority_voting = []\n for tests in range(len(dataset)):\n # print(forest_targets)\n trees_guess = forest_targets[:, tests]\n best_guess = sstats.mode(trees_guess)[0][0]\n # print(trees_guess)\n # print(best_guess)\n # assert 0 == 1\n after_majority_voting.append(best_guess)\n\n classification = np.array(dataset['target'] == np.array(after_majority_voting))\n return (len(classification) - np.count_nonzero(classification)) / len(dataset)\n\n def mean_tree_errors(self):\n return np.array(self.errors).mean(axis=0)\n\n def mean_agreement(self):\n forest_targets = np.array([[t.classify(self.test.iloc[i]) for i in range(len(self.test))] for t in self.trees])\n\n res = 0\n for i in range(len(self.trees)):\n for j in range(i+1, len(self.trees)):\n simmilar = [forest_targets[i] == forest_targets[j]]\n res += np.count_nonzero(simmilar) / len(self.test)\n \n if res == 0:\n return 0\n return res / (len(self.trees) * (len(self.trees)-1))/2\n \n\n def print_forest(self):\n for i in range(self.trees_num):\n print(\"Tree {}: RF Err rate {}\\t Err rate {}\\t OOB err rate {}\".format(i,round(self.errors[i][2],3),round(self.errors[i][0],3), round(self.errors[i][1],3)))\n","repo_name":"krzysztofnyczka/TFT_predictor","sub_path":"Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":15403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34657418886","text":"import pdb\n\nclass BT():\n def __init__(self):\n self.root = None\n self.size = 0\n\n def add(self, key, node):\n # pdb.set_trace()\n if node is None:\n node = self.root\n\n if self.root is None:\n self.root = TreeNode(key)\n return\n\n if key <= node.value:\n if node.left is None:\n node.left = TreeNode(key)\n return\n else:\n self.add(key, node.left)\n node.left_size += 1\n\n else:\n if node.right is None:\n node.right = TreeNode(key)\n return\n else:\n self.add(key, node.right)\n\n\n def __repr__(self):\n return \"The root of the tree is: \" % self.root\n\nclass TreeNode():\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n self._track = None\n self.left_size = 0\n\n def insertLeftChild(self, value):\n self.left = value\n\n def insertRightChild(self, value):\n self.right = value\n\n #this method needs to be called on the root initially\n def getRankOfNode(self, v): #v needs to be value we are looking for\n # pdb.set_trace()\n if self.value == v:\n return self.left_size\n elif v < self.value:\n if self.left is None:\n return -1\n else:\n self.left.getRankOfNode(v)\n else:\n if self.right is None:\n return -1\n else:\n return self.left_size + 1 + self.right.getRankOfNode(v)\n\n\n @property\n def track(self):\n return self._track\n\n @track.setter\n def track(self, value):\n self._track = value #tracks the numbers of values less than this number\n\n def __repr__(self):\n return \"Value is: %s\" % self.value\n\nclass Stream():\n def __init__(self):\n self.values = [5,1,4,4,5,9,7,13,3]\n\n def __iter__(self):\n for value in self.values:\n yield value\n\n\nif __name__ == \"__main__\":\n bt = BT()\n stream = Stream()\n for number in stream:\n bt.add(number, None)\n result = bt.root.getRankOfNode(9)\n print(result)\n","repo_name":"redixhumayun/ctci","sub_path":"Sorting_Searching/streamRank.py","file_name":"streamRank.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74487974025","text":"import numpy as np\n\n# Stable O(n^2) sorting algorithm\n# sorts through sorting sub arrays\n# swaps values through a second pointer\n\n\nclass InsertionSort():\n\n \"\"\"\n class for iterative and recursive insertion sort\n \"\"\"\n\n def iterative(self, arr):\n\n n = len(arr)\n for i in range(1, n):\n\n j = i - 1 # pointer\n\n # sort sub array until sorted\n while j >= 0 and arr[j] > arr[j + 1]:\n # swap\n tmp = arr[j + 1]\n arr[j + 1] = arr[j]\n arr[j] = tmp\n\n j -= 1 # compare next element before\n \n return arr\n\n def recursive(self, arr, i=1):\n\n # base case of sorted all sub arrays\n if i == len(arr):\n return arr\n\n j = i - 1\n\n # sort sub array until sorted\n while j >= 0 and arr[j + 1] < arr[j]:\n # swap \n tmp = arr[j + 1]\n arr[j + 1] = arr[j]\n arr[j] = tmp\n\n j -= 1 # compare next element before\n\n return self.recursive(arr, i + 1) # go to next subarray\n\n def __call__(self, arr, recursive=False):\n if recursive:\n return self.recursive(arr)\n return self.iterative(arr)\n\nif __name__ == \"__main__\":\n n = 15\n arr = list(np.random.randint(-10, 10, (n, )))\n print(arr)\n sort = InsertionSort()\n print(sort(arr))\n print(sort(arr, recursive=True))\n \n \n\n\n","repo_name":"Andrew011002/Data-Structures","sub_path":"Sorting/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33595172856","text":"#!/usr/bin/env python\nimport random\nimport xlsxwriter\n\nsong_list = []\ntry:\n with open(\"./../songs.txt\",\"r\", encoding='utf-8') as f:\n song_list = f.readlines()\nexcept:\n print(\"Could not read songs.txt file.\")\n\n# Card style and number of cards to generate\ncolumns = 3\nrows = 2\ncards = 100\n\nmin_rand_num = 1\nmax_rand_num = len(song_list)\n\n# Spreadsheed table formats\nworkbook = xlsxwriter.Workbook('./../bingoCards.xlsx')\nworksheet = workbook.add_worksheet()\ncell_format = workbook.add_format()\ncell_format.set_align('center')\ncell_format.set_border(1)\ncell_format.set_border_color('black')\nmerge_format = workbook.add_format({\n 'align': 'center',\n 'bold': True,\n 'border': 1,\n})\n\ndef add_card_to_spreadsheet(bingo_card, row):\n worksheet.merge_range(row, 0, row, 2, \"Music Bingo\", merge_format)\n row += 1\n for col, data in enumerate(bingo_card):\n worksheet.write_column(row, col, data, cell_format)\n row += 3\n return row\n\ndef generate_cards():\n rand_range = range(min_rand_num, max_rand_num)\n row = 0\n\n try:\n for h in range(cards):\n card_as_numbers = []\n try:\n card_as_numbers = random.sample(rand_range, columns * rows)\n except:\n print(\"There are not enough songs in the list to generate bingo cards.\")\n bingo_card = []\n for i in range(columns):\n bingo_row = []\n for j in range(rows):\n number = card_as_numbers[i * rows + j]\n bingo_row.append(song_list[number])\n bingo_card.append(bingo_row)\n row = add_card_to_spreadsheet(bingo_card, row)\n except:\n print(\"Bingo cards could not been generated.\")\n\n return row > 0 # if greater than 0, cards were generated\n\ndef format_and_save_file():\n worksheet.autofit()\n workbook.close()\n print(\"Music bingo cards generated succesfully.\")\n\ndef main():\n if (generate_cards()):\n format_and_save_file()\n \nmain()","repo_name":"benattxurruka/music-bingo-card-generator","sub_path":"source/CardGenerator.py","file_name":"CardGenerator.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70145797066","text":"# You are given a tree (a simple connected graph with no cycles). You have to remove as many edges from the tree as possible to obtain a forest with the condition that : Each connected component of the forest should contain an even number of vertices.\n\n# To accomplish this, you will remove some edges from the tree. Find out the number of removed edges.\n\n# Input Format\n# The first line of input contains two integers N and M. N is the number of vertices and M is the number of edges.\n# The next M lines contain two integers ui and vi which specifies an edge of the tree. (1-based index)\n\n# Output Format\n# Print the answer, a single integer.\n\nN, M = map(int, raw_input().split())\nedges = []\nfor x in xrange(M):\n u, v = map(int, raw_input().split())\n edges.append([u, v])\n\n\ninfo = []\n\n\ndef findChildren(n):\n children = []\n for x in xrange(M):\n if edges[x][1] == n:\n children.append(edges[x][0])\n childN = findChildren(edges[x][0])\n for child in childN:\n children.append(child)\n return children\ntree = []\n\n\ndef generateTree():\n global tree\n for x in xrange(N):\n tree.append([x + 1])\n for x in xrange(N):\n tree[x].append(findChildren(x + 1))\n return tree\n\ngenerateTree()\n\ncount = 0\nfor x in xrange(N):\n if len(tree[x][1]) % 2 == 1:\n count += 1\nprint (count - 1)\n","repo_name":"defaults/competitive-programming","sub_path":"hackerrank/algorithms/Graph Theory/even_tree.py","file_name":"even_tree.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10773118616","text":"from itertools import product\nfrom math import ceil\n\nimport torch\n\nimport data.config\n\n\nclass Anchor:\n def __init__(self, config, image_size=None):\n self.min_sizes = config['min_sizes']\n self.steps = config['steps']\n self.clip = config['clip']\n\n self.image_size = image_size # H W\n\n self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps]\n\n def get_anchors(self):\n anchors = []\n for k, feature_map in enumerate(self.feature_maps):\n min_sizes = self.min_sizes[k]\n for i, j in product(range(0, feature_map[0]), range(0, feature_map[1])):\n for min_size in min_sizes:\n s_kx = min_size / self.image_size[1]\n s_ky = min_size / self.image_size[0]\n dense_cx = [x / feature_map[1] for x in [j + 0.5]]\n dense_cy = [y / feature_map[0] for y in [i + 0.5]]\n for cy, cx in product(dense_cy, dense_cx):\n anchors += [cx, cy, s_kx, s_ky]\n output = torch.Tensor(anchors).reshape(-1, 4)\n if self.clip:\n output.clamp_(max=1, min=0)\n return output\n\n\nif __name__ == '__main__':\n anchors = Anchor(data.config.cfg_mobilenet, (640, 640)).get_anchors()\n print(anchors.shape)\n","repo_name":"qiaofengsheng/Pytorch-RetinaFace","sub_path":"tools/anchor.py","file_name":"anchor.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25125245178","text":"#RUS - найдите произведение цифр\n#ENG - find multiply of digits\n\ndef mult(x):\n s = 1\n while x > 0:\n s *= (x % 10)\n x //= 10\n return s\n\na = int(input())\nb = int(input())\n\nfor i in range(a, b + 1):\n if mult(i) > 100:\n print(i, end=\" \")\n","repo_name":"artemiy-228/Python_Ege","sub_path":"Training task_25.py","file_name":"Training task_25.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25955941210","text":"from app.infra.web.service import SentimentCommentEstimator\nfrom app.infra.web.configs.database import get_db_connection\nfrom app.core.use_case import CommentPublicationInteractor\nfrom app.infra.web.repository import (\n PostRepository,\n UserRepository,\n CommentRepository\n)\n\nimport time\n\nCOMMENTS_NUM = 100\nCONTENT = \"Throughout the rest of its running time, “Black Adam” leans into the inevitability of Adam’s evolution toward good-guy status, condensing the transformation of the title character in the first two “Terminator” films (there are even comic bits where people try to teach Adam sarcasm and the Geneva Conventions). \\\"Black Adam\\\" then stirs in dollops of a macho sentimentality that used to be common in old Hollywood dramas about loners who needed to get involved in a cause in order to reset their moral compasses or recognize their own worth. But the sharp edge that the film brings to the early parts of its story never dulls.\"\n\ndb = next(get_db_connection())\npost_repository = PostRepository(db)\nuser_repository = UserRepository(db)\ncomment_repository = CommentRepository(db)\nsentiment_comment_estimator = SentimentCommentEstimator()\ncomment_publication_interactor = CommentPublicationInteractor(\n comment_repository=comment_repository,\n post_repository=post_repository,\n user_repository=user_repository,\n sentiment_comment_estimator=sentiment_comment_estimator\n)\nrequest_model = {\n \"username\": \"denis\",\n \"post_id\": 1,\n \"content\": CONTENT\n}\nwhile True:\n begin_t = time.time()\n response_model = comment_publication_interactor.publish_comment(**request_model)\n end_t = time.time()\n delta_t = end_t - begin_t\n\n print(f' time: {delta_t * 1000:.0f} ms')","repo_name":"Derzhavin/stream_data_analysis","sub_path":"src/direct_ampq_comments_publisher.py","file_name":"direct_ampq_comments_publisher.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25656288747","text":"class Solution:\n def maximum69Number (self, num: int) -> int:\n Snum = list(str(num))\n\n for i in range(len(Snum)):\n if Snum[i] == \"6\":\n Snum[i] = \"9\"\n break\n \n return int(''.join(Snum))\n","repo_name":"NaolB02/A2SV","sub_path":"1323. Maximum 69 Number.py","file_name":"1323. Maximum 69 Number.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17543052295","text":"if __name__=='__main__':\n\n# for loops\n for i in range(5, 10):\n print(i)\n #Simplified Bizz, Buzz game\n for i in range(1, 10):\n if i == 5:\n print(\"Buzz\")\n print(\"bizz\")\n print(\"\\n\")\n #Stop when you find a 6\n #break statement stops the loop\n for i in range(1, 10):\n if i == 6:\n print(\"Found\")\n break\n print(\"Not Yet\")\n print(\"It was found at {}\".format(i)) #iterate over letters\n from string import ascii_lowercase\n for ch in ascii_lowercase:\n print(ch)\n\n# While loops\n\n max=5\n counter=0\n total=0\n while counter <= max :\n total+=9.99\n counter+=1\n print(\"The final amount is: {0:5.2f}\".format(total))\n\n #While true sample\n text=\"\"\n while 1:\n print (\"Enter name\")\n uname=input()\n if(uname == \"joe\"):\n break\n print(\"Finished\")\n\n\n","repo_name":"vrednyj/Python_Programming_Labs","sub_path":"Selection Structures & Loops/Loops.py","file_name":"Loops.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3966245692","text":"\nimport cv2\nimport os\nimport numpy as np\nimport keras\nimport matplotlib.pyplot as plt\nimport download\nfrom random import shuffle\nfrom keras.applications import VGG16\nfrom keras import backend as K\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input\nfrom keras.layers import LSTM\nfrom keras.layers import Dense, Activation\nimport sys\nimport h5py\n\n\n\ndef print_progress(count, max_count):\n # Percentage completion.\n pct_complete = count / max_count\n\n # Status-message. Note the \\r which means the line should\n # overwrite itself.\n msg = \"\\r- Progress: {0:.1%}\".format(pct_complete)\n\n # Print it.\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n\n\ndir_fight=\"/kaggle/input/violence-final/fight\"\ndir_not_fight=\"/kaggle/input/violence-final/not_fight\"\nlist_fight=os.listdir(dir_fight)\nlist_no_fight=os.listdir(dir_not_fight)\n\nimport random\nfight_final=random.sample(list_fight, 800)\n\n\n\nlen(fight_final)\n\n\n\nno_fight_final=random.sample(list_no_fight,800)\n\n\n\nfight_labels = []\nno_fight_labels = []\nfor i in range (800):\n fight_labels.append([1,0])\n no_fight_labels.append([0,1])\n\n\n\nfinal = fight_final + no_fight_final\n\n\n\nlabels = fight_labels + no_fight_labels\n\n\n\nc = list(zip(final,labels))\nshuffle(c)\n \nnames, labels = zip(*c)\n\n\n\nlabels[0]\n\n\n\n# Frame size \nimg_size = 224\n\nimg_size_touple = (img_size, img_size)\n\n# Number of channels (RGB)\nnum_channels = 3\n\n# Flat frame size\nimg_size_flat = img_size * img_size * num_channels\n\n# Number of classes for classification (Violence-No Violence)\nnum_classes = 2\n\n# Number of files to train\n_num_files_train = 1\n\n# Number of frames per video\n_images_per_file = 20\n\n# Number of frames per training set\n_num_images_train = _num_files_train * _images_per_file\n\n\n\ndef get_frames(current_dir, file_name):\n \n in_file = os.path.join(current_dir, file_name)\n \n images = []\n \n vidcap = cv2.VideoCapture(in_file)\n \n success,image = vidcap.read()\n \n count = 0\n\n while count<_images_per_file:\n \n RGB_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n \n res = cv2.resize(RGB_img, dsize=(img_size, img_size),\n interpolation=cv2.INTER_CUBIC)\n \n images.append(res)\n \n success,image = vidcap.read()\n \n count += 1\n \n resul = np.array(images)\n \n resul = (resul / 255.).astype(np.float16)\n \n return resul\n\n\nimage_model = VGG16(include_top=True, weights='imagenet')\n\n\ninput_shape = image_model.layers[0].output_shape[1:3]\nprint(input_shape)\n\n\n\n# We will use the output of the layer prior to the final\n# classification-layer which is named fc2. This is a fully-connected (or dense) layer.\ntransfer_layer = image_model.get_layer('fc2')\n\nimage_model_transfer = Model(inputs=image_model.input,\n outputs=transfer_layer.output)\n\ntransfer_values_size = K.int_shape(transfer_layer.output)[1]\n\n\nprint(\"The input of the VGG16 net have dimensions:\",K.int_shape(image_model.input)[1:3])\n\nprint(\"The output of the selecter layer of VGG16 net have dimensions: \", transfer_values_size)\n\n\n\ndef get_transfer_values(current_dir, file_name):\n \n # Pre-allocate input-batch-array for images.\n shape = (_images_per_file,) + img_size_touple + (3,)\n \n image_batch = np.zeros(shape=shape, dtype=np.float16)\n \n image_batch = get_frames(current_dir, file_name)\n \n # Pre-allocate output-array for transfer-values.\n # Note that we use 16-bit floating-points to save memory.\n shape = (_images_per_file, transfer_values_size)\n transfer_values = np.zeros(shape=shape, dtype=np.float16)\n\n transfer_values = image_model_transfer.predict(image_batch)\n \n return transfer_values\n\n\n\ndef proces_transfer(vid_names, labels):\n \n count = 0\n \n tam = len(vid_names)\n \n # Pre-allocate input-batch-array for images.\n shape = (_images_per_file,) + img_size_touple + (3,)\n \n while count confidence_threshold:\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x, y, x1, y1) = box.astype(\"int\")\n width = x1 - x\n height = y1 - y\n # Проверка, что размер лица не меньше min_w и min_h\n if width >= min_w and height >= min_h:\n faces.append({'box': [x, y, width, height]})\n\n return faces\n\n\nif __name__ == '__main__':\n frame = cv2.imread('./Faces.Best/ID.11.jpg')\n print(detect_face(frame, confidence_threshold=0.55, model_path=model_path, proto_path=proto_path))\n","repo_name":"dnp34/temp_pyqt5","sub_path":"ssd_face_detection.py","file_name":"ssd_face_detection.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25242588268","text":"\"\"\"\n输入一个整型数组,数组中的一个或���续多个整数组成一个子数组。求所有子数组的和的最大值。\n\n要求时间复杂度为O(n)。\n\n \n\n示例1:\n\n输入: nums = [-2,1,-3,4,-1,2,1,-5,4]\n输出: 6\n解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。\n\"\"\"\n\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n n = len(nums)\n dp = [0] * n\n dp[0] = nums[0]\n for i in range(1, n):\n dp[i] = max(dp[i - 1] + nums[i], nums[i])\n return max(dp)\n","repo_name":"LeungLoh/algorithm","sub_path":"剑指offer/剑指 Offer 42. 连续子数组的最大和.py","file_name":"剑指 Offer 42. 连续子数组的最大和.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72086868104","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import PhotoImage\nfrom PIL import ImageTk\nimport dangnhap\nimport socket\nimport sinhvien\nimport diemdanh\nimport thongke\nfrom backend.dl_giangvien import tengv_email,makhoa_email,sdt_email,magv_email,update_sdt\nfrom backend.dl_khoa import tenkhoa\nfrom backend.dl_tkb import kt_lichgiang_gv,gv_dd\nimport doimatkhau\nimport taikhoan_thongbao\nimport datetime\nimport threading\nimport diemdanh_bu\n\ndef main():\n\n def loadding(a):\n if a == 1:# đang load dữ liệu\n lb_loadding.place(x=904,y=1)\n btn_capnhatsdt[\"state\"] = \"disabled\"\n btndangxuat[\"state\"] = \"disabled\"\n btndangxuat1[\"state\"] = \"disabled\"\n btndoimatkhau[\"state\"] = \"disabled\"\n else:\n lb_loadding.place_forget()\n btn_capnhatsdt[\"state\"] = \"normal\"\n btndangxuat[\"state\"] = \"normal\"\n btndangxuat1[\"state\"] = \"normal\"\n btndoimatkhau[\"state\"] = \"normal\"\n\n\n\n def luong(ham):\n threading.Thread(target=ham).start()\n \n def loaddl():\n makhoa.set(makhoa_email(email))\n tengv.set(tengv_email(email))\n magv.set(magv_email(email))\n tenkh.set(tenkhoa(makhoa.get()))\n sdt.set(sdt_email(email))\n\n lbgv.config(text=tengv.get())\n lb_gv.config(text=tengv.get())\n lbtk.config(text=tenkh.get())\n\n lichgiang=(kt_lichgiang_gv(magv.get(),ngay))\n\n gvdd=gv_dd(magv.get(),ngay)\n\n if lichgiang == []:\n data_lichgiang.set(\"Hôm nay, không có tiết giảng\")\n else:\n data_lichgiang.set(\"Hôm nay, có lịch giảng !\")\n btnthongbao=Button(bg,image=ing_btnthongbao,bd=0,highlightthickness=0,command=lambda: chuyentrang_lichgiang(lichgiang))\n btnthongbao.place(x=920,y=365)\n lbstb=Label(bg,text=len(lichgiang),fg=\"red\",font=(\"Arial\",10),bg=\"white\")\n lbstb.place(x=952,y=360)\n\n if gvdd == []:\n data_dd.set(\"Đã điểm danh tất cả\")\n else:\n data_dd.set(\"Chưa điểm danh\")\n btnthongbaodd=Button(bg,image=ing_btnthongbao,bd=0,highlightthickness=0,command= thongbaodd)\n btnthongbaodd.place(x=920,y=425)\n lbstb1=Label(bg,text=len(gvdd),fg=\"red\",font=(\"Arial\",10),bg=\"white\")\n lbstb1.place(x=952,y=420)\n print(len(gvdd))\n loadding(0)\n\n\n\n def dinh_dang_ngay(ngay):\n ngay=str(ngay).replace(\"/\",\" \")\n ngay=str(ngay).replace(\"-\",\" \")\n d=ngay.split()\n if len(d[0])==1:\n d[0]=\"0\"+d[0]\n if len(d[1])==1:\n d[1]=\"0\"+d[1]\n if len(d[2]) ==4 :\n ngay=d[0]+\"/\"+d[1]+\"/\"+d[2]\n else:\n ngay=d[1]+\"/\"+d[0]+\"/20\"+d[2]\n return ngay\n\n def capnhat_sdt():\n if len(sdt.get()) !=10 or sdt.get().isnumeric()== False:\n messagebox.showwarning(\"thông báo\",\"Số điện thoại không đúng\")\n elif update_sdt(magv.get(),sdt.get()):\n messagebox.showinfo(\"thông báo\",\"Đã cập nhật số điện thoại\")\n else:\n messagebox.showwarning(\"Lỗi \",\"Cập nhật không thành công\")\n def thongbaodd():\n win.destroy()\n diemdanh_bu.main(1)\n\n def chuyentrang_lichgiang(lichgiang):\n win.destroy()\n taikhoan_thongbao.main(lichgiang, tengv.get())\n\n def btndoimatkhau():\n win.destroy()\n doimatkhau.main(1)\n def menuthongke():\n win.destroy()\n thongke.main()\n\n def menudiemdanh():\n win.destroy()\n diemdanh.main()\n\n def menuthemsv():\n win.destroy()\n sinhvien.main()\n\n def dangxuat():\n if messagebox.askyesno(\"Thông báo\",\"Bạn có thực sự muốn đăng xuất ?\"):\n ten_thiet_bi = socket.gethostname()\n file=open(ten_thiet_bi+\".txt\",\"w\")\n file.write(\"\")\n file.close()\n win.destroy()\n dangnhap.main()\n else: return\n\n win=Tk()\n win.geometry(\"1000x600+300+120\")\n win.resizable(False,False)\n win.iconbitmap(r\"img/iconphanmem.ico\")\n win.config(bg=\"white\")\n win.title(\"Thông tin người dùng\")\n img_bg=ImageTk.PhotoImage(file=\"img/bgtaikhoan.png\")\n img_bg1=ImageTk.PhotoImage(file=\"img/bgtaikhoan1.png\")\n ing_menuthem=ImageTk.PhotoImage(file=\"img/menuthemdl1.png\")\n ing_menudiemdanh=ImageTk.PhotoImage(file=\"img/menudiemdanh.png\")\n ing_menutaikhoan=ImageTk.PhotoImage(file=\"img/menutaikhoan1.png\")\n ing_menuthongke=ImageTk.PhotoImage(file=\"img/menuthongke.png\")\n ing_btndangxuat=ImageTk.PhotoImage(file=\"img/btndangxuat.png\")\n ing_btndangxuat1=ImageTk.PhotoImage(file=\"img/btndangxuat1.png\")\n ing_btndoimatkhau=ImageTk.PhotoImage(file=\"img/btndoimatkhau.png\")\n ing_btnthongbao=ImageTk.PhotoImage(file=\"img/btnthongbao.png\")\n ing_btnthietlap=ImageTk.PhotoImage(file=\"img/thietlap.png\")\n ing_btnquaylai=ImageTk.PhotoImage(file=\"img/btnquaylai.png\")\n ing_capnhatsdt=ImageTk.PhotoImage(file=\"img/capnhatsdt.png\")\n#------------------------------------------------------------------------------\n ten_thiet_bi = socket.gethostname()\n d=[]\n with open(ten_thiet_bi+\".txt\",\"r\") as file:\n d=file.read().split()\n email=d[0]\n makhoa=StringVar()\n tengv=StringVar()\n magv=StringVar()\n tenkh=StringVar()\n sdt=StringVar()\n data_lichgiang=StringVar()\n data_dd=StringVar()\n\n\n time = datetime.datetime.now()\n now = time.strftime(\"%x\")\n ngay=dinh_dang_ngay(now)\n \n#-------------------------------------------------------------------------------\n bg=Canvas(win,width=1000,height=600,bg=\"white\")\n bg.pack(side=\"left\",padx=0)\n anhnen=bg.create_image(500,300,image=img_bg)\n\n menuthem=Button(bg,image=ing_menuthem,bd=0,highlightthickness=0,activebackground='#857EBD',command=menuthemsv)\n menuthem.place(x=46,y=129)\n\n menudiemdanh=Button(bg,image=ing_menudiemdanh,bd=0,highlightthickness=0,activebackground='#857EBD',command=menudiemdanh)\n menudiemdanh.place(x=46,y=248)\n\n menuthongke=Button(bg,image=ing_menuthongke,bd=0,highlightthickness=0,activebackground='#857EBD',command=menuthongke)\n menuthongke.place(x=46,y=366)\n\n menutaikhoan=Button(bg,image=ing_menutaikhoan,bd=0,highlightthickness=0,activebackground='#857EBD')\n menutaikhoan.place(x=46,y=484)\n\n btndangxuat=Button(bg,image=ing_btndangxuat,bd=0,highlightthickness=0,command=dangxuat)\n btndangxuat.place(x=248,y=44)\n\n \n lbgv=Label(bg,font=(\"Baloo Tamma 2 Medium\",12),fg=\"#A672BB\",bg=\"white\")\n lbgv.place(x=45,y=38)\n\n lb_gv=Label(bg,font=(\"Baloo Tamma 2 Medium\",12),fg=\"black\",bg=\"white\")\n lb_gv.place(x=570,y=201)\n \n lbtk=Label(bg,font=(\"Baloo Tamma 2 Medium\",12),fg=\"black\",bg=\"white\")\n lbtk.place(x=570,y=141)\n\n lbe=Label(bg,text=email,font=(\"Baloo Tamma 2 Medium\",12),fg=\"black\",bg=\"white\")\n lbe.place(x=570,y=261)\n\n lbsdt=Entry(bg,textvariable=sdt,font=(\"Baloo Tamma 2 Medium\",12),fg=\"black\",bg=\"white\",bd=0,highlightthickness=0,)\n lbsdt.place(x=570,y=321)\n\n btn_capnhatsdt=Button(bg,image=ing_capnhatsdt,bd=0,highlightthickness=0,command=capnhat_sdt)\n btn_capnhatsdt.place(x=925,y=310)\n\n \n\n btndoimatkhau=Button(bg,image=ing_btndoimatkhau,bd=0,highlightthickness=0,command=btndoimatkhau)\n btndoimatkhau.place(x=672,y=539)\n\n btndangxuat1=Button(bg,image=ing_btndangxuat1,bd=0,highlightthickness=0,command=dangxuat)\n btndangxuat1.place(x=836,y=537)\n\n lbcg=Label(bg,textvariable=data_lichgiang,font=(\"Baloo Tamma 2 Medium\",12),fg=\"black\",bg=\"white\")\n lbcg.place(x=570,y=381)\n\n lbdd=Label(bg,textvariable=data_dd,font=(\"Baloo Tamma 2 Medium\",12),fg=\"black\",bg=\"white\")\n lbdd.place(x=570,y=441)\n\n # btnthietlap=Button(bg,image=ing_btnthietlap,bd=0,highlightthickness=0,command=thietlap)\n # btnthietlap.place(x=949,y=2)\n lb_loadding=Label(bg,text=\" Đang tải . . . \", font=(\"Baloo Tamma 2 Medium\",11),bg=\"#FFF4FF\",fg=\"#AD7B98\", width=14)\n\n luong(loaddl)\n loadding(1)\n win.mainloop()\n\nif __name__ == '__main__':\n main()","repo_name":"HUYTIEUQUY/face_reconition","sub_path":"taikhoan.py","file_name":"taikhoan.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14831755652","text":"from flask import Flask, render_template, request, redirect\n\nfrom app.point_table import PointTable\n\napp = Flask(__name__)\n\npoint_table = PointTable()\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\", games=point_table.get_games())\n\n\n@app.route('/add_game', methods=['POST'])\ndef add_game():\n try:\n score = request.form.get('score')\n score = int(score) if str(score).isdigit() else None\n point_table.add_game(score)\n return redirect('/')\n except Exception as err:\n return render_template(\"index.html\", games=point_table.get_games(), error=str(err))\n\n\napp.run()\n","repo_name":"yurihartmann/point_table","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41601086121","text":"\nimport os\nimport urllib.request as ur\n\nclass Scrape_Saver:\n\n def _init_(self, base_url, save_location):\n self.base_url = base_url\n self.save_location = save_location\n return\n\n def retrieve(self, item):\n if item in os.listdir(self.save_location):\n with open(self.save_location + item,\"r\") as infile:\n return infile.read()\n else:\n string = ur.urlopen(self.base_url.format(item)).read().decode()\n with open(self.save_location + item, 'w') as outfile:\n outfile.write(string)\n return string\n\n def _str_(self): \n return str(os.listdir(self.save_location))\n\n\nif __name__ == '_main_':\n url = \"http://www.uniprot.org/uniprot/{}.fasta\"\n item = 'P69892'\n x = Scrape_Saver(url, 'saves/')\n print(x.retrieve(item))\n print (x)","repo_name":"janitanay2707/CS696","sub_path":"Exercises/excercise_06.py","file_name":"excercise_06.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29232544131","text":"from bash_bot.bot import BashBot\nfrom utils.files import load_config_json, load_scripts_json\n\n\ndef main():\n config = load_config_json()\n scripts = load_scripts_json()\n bot = BashBot(config, scripts)\n bot.start()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SudoOmbro/BashBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"38483193375","text":"import os\nfrom shlex import quote\nfrom fast_align.utils import run_bash_command\n\n\ndef read_err(err):\n T, m = \"\", \"\"\n for line in open(err):\n if \"expected target length\" in line:\n m = line.split()[-1]\n elif \"final tension\" in line:\n T = line.split()[-1]\n return T, m\n\n\ndef align_corpus(\n fast_align_dir: str,\n corpus_path: str,\n output_dir: str,\n alignment_direction: str,\n) -> None:\n\n print(f\"Running Fast Align...\")\n assert alignment_direction not in [\n \"forward, reverse, combine\"\n ], f\"alignment_direction {alignment_direction} not supported. Supported directions: [forward, reverse, combine]\"\n\n fast_align_executable: str = os.path.join(fast_align_dir, \"fast_align\")\n atools_executable: str = os.path.join(fast_align_dir, \"atools\")\n\n if alignment_direction == \"forward\" or alignment_direction == \"combine\":\n print(f\" Running Forward Direction...\")\n forward_name: str = \"forward.talp\"\n forward_command: str = (\n f\"{quote(fast_align_executable)} \"\n f\"-i {quote(corpus_path)} \"\n f\"-d -o -v \"\n f\"> {quote(os.path.join(output_dir,forward_name))}\"\n )\n run_bash_command(forward_command)\n\n if alignment_direction == \"reverse\" or alignment_direction == \"combine\":\n print(f\" Running Reverse Direction...\")\n reverse_name: str = \"reverse.talp\"\n reverse_command: str = (\n f\"{quote(fast_align_executable)} \"\n f\"-i {quote(corpus_path)} \"\n f\"-r -d -o -v \"\n f\"> {quote(os.path.join(output_dir,reverse_name))}\"\n )\n run_bash_command(reverse_command)\n\n if alignment_direction == \"combine\":\n print(f\" Combining directions wi the grow-diag-final-and method...\")\n forward_name: str = \"forward.talp\"\n reverse_name: str = \"reverse.talp\"\n combine_name: str = \"grow_diag_final-and.talp\"\n combine_command: str = (\n f\"{quote(atools_executable)} \"\n f\"-i {quote(os.path.join(output_dir,forward_name))} \"\n f\"-j {quote(os.path.join(output_dir,reverse_name))} \"\n f\"-c grow-diag-final-and \"\n f\"> {quote(os.path.join(output_dir,combine_name))}\"\n )\n\n run_bash_command(combine_command)\n\n print(f\"Done!\")\n\n\ndef train_fast_align(\n fast_align_dir: str,\n corpus_path: str,\n output_dir: str,\n) -> None:\n fast_align_executable = os.path.join(fast_align_dir, \"fast_align\")\n\n forward_params_name: str = \"fwd_params\"\n forward_align_name: str = \"foward.talp\"\n forward_error_name: str = \"fwd_err\"\n forward_command: str = (\n f\"{quote(fast_align_executable)} \"\n f\"-i {quote(corpus_path)} \"\n f\"-d -o -v -p \"\n f\" {quote(os.path.join(output_dir, forward_params_name))} \"\n f\"> {quote(os.path.join(output_dir, forward_align_name))} \"\n f\"2> {quote(os.path.join(output_dir, forward_error_name))}\"\n )\n\n reverse_params_name: str = \"rev_params\"\n reverse_align_name: str = \"reverse.talp\"\n reverse_error_name: str = \"rev_err\"\n reverse_command: str = (\n f\"{quote(fast_align_executable)} \"\n f\"-i {quote(corpus_path)} \"\n f\"-r -d -o -v -p \"\n f\" {quote(os.path.join(output_dir, reverse_params_name))} \"\n f\"> {quote(os.path.join(output_dir, reverse_align_name))} \"\n f\"2> {quote(os.path.join(output_dir, reverse_error_name))}\"\n )\n\n run_bash_command(forward_command)\n run_bash_command(reverse_command)\n\n\ndef inference_fast_align(\n fast_align_dir: str,\n corpus_path: str,\n model_dir: str,\n output_path: str,\n heuristic: str = \"grow-diag-final-and\",\n) -> None:\n fast_align_executable: str = os.path.join(fast_align_dir, \"fast_align\")\n atools_executable: str = os.path.join(fast_align_dir, \"atools\")\n forward_params_name: str = os.path.join(model_dir, \"fwd_params\")\n forward_error_name: str = os.path.join(model_dir, \"fwd_err\")\n reverse_params_name: str = os.path.join(model_dir, \"rev_params\")\n reverse_error_name: str = os.path.join(model_dir, \"rev_err\")\n\n fwd_T, fwd_m = read_err(forward_error_name)\n rev_T, rev_m = read_err(reverse_error_name)\n\n forward_file_name = f\"{output_path}.forward\"\n forward_command = (\n f\"{fast_align_executable} \"\n f\"-i {corpus_path} \"\n f\"-d \"\n f\"-T {fwd_T} \"\n f\"-m {fwd_m} \"\n f\"-f {forward_params_name} \"\n f\"> {forward_file_name} \"\n )\n run_bash_command(forward_command)\n\n get_column_command = (\n f\"cat {forward_file_name} | \"\n f\"awk -F \\\"\\\\\\\\\\\\\\\\|\\\\\\\\\\\\\\\\|\\\\\\\\\\\\\\\\|\\\" '{{print $3}}' | \"\n f\"awk '{{$1=$1}};1' \"\n f\"> {forward_file_name}.tmp\"\n )\n run_bash_command(get_column_command)\n move_command = f\"mv {forward_file_name}.tmp {forward_file_name}\"\n run_bash_command(move_command)\n\n reverse_file_name = f\"{output_path}.reverse\"\n reverse_command = (\n f\"{fast_align_executable} \"\n f\"-i {corpus_path} \"\n f\"-r \"\n f\"-d \"\n f\"-T {rev_T} \"\n f\"-m {rev_m} \"\n f\"-f {reverse_params_name} \"\n f\"> {reverse_file_name} \"\n )\n run_bash_command(reverse_command)\n\n get_column_command = (\n f\"cat {reverse_file_name} | \"\n f\"awk -F \\\"\\\\\\\\\\\\\\\\|\\\\\\\\\\\\\\\\|\\\\\\\\\\\\\\\\|\\\" '{{print $3}}' | \"\n f\"awk '{{$1=$1}};1' \"\n f\"> {reverse_file_name}.tmp \"\n )\n run_bash_command(get_column_command)\n move_command = f\"mv {reverse_file_name}.tmp {reverse_file_name}\"\n run_bash_command(move_command)\n\n combine_command: str = (\n f\"{quote(atools_executable)} \"\n f\"-i {forward_file_name} \"\n f\"-j {reverse_file_name} \"\n f\"-c {heuristic} \"\n f\"> {output_path}\"\n )\n run_bash_command(combine_command)\n","repo_name":"ikergarcia1996/Easy-Label-Projection","sub_path":"fast_align/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":5799,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"39436190439","text":"import ast #to process trees.\r\nfile=open(\"cities\",\"r\") #open data file\r\ndata=file.read()\r\ngraph=ast.literal_eval(data)\r\nprint(graph) #pritn graph\r\nfile.close() #to prevent file errors.\r\n\r\nvisited = [] #visited nodes list.\r\nqueue = [] #Initialize a empty queue.\r\n\r\ndef bfs(visited, graph, node): #BFS function\r\n visited.append(node)\r\n queue.append(node)\r\n\r\n while queue: # visiting each node by a loop\r\n m = queue.pop(0)\r\n print (m, end = \" \")\r\n\r\n for neighbour in graph[m]:\r\n if neighbour not in visited:\r\n visited.append(neighbour)\r\n queue.append(neighbour)\r\n\r\nprint(\"The output of The Breadth-First Search algorithm \")\r\nbfs(visited, graph, '5') #call The algorithm function","repo_name":"Kyrillos-George/Ai-Projects","sub_path":"BFS_Algorithm.py","file_name":"BFS_Algorithm.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7142065249","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/custom-actions\n\n\n# This is a simple example for a custom action which utters \"Hello World!\"\n\n# from typing import Any, Text, Dict, List\n#\n# from rasa_sdk import Action, Tracker\n# from rasa_sdk.executor import CollectingDispatcher\n#\n#\n# class ActionHelloWorld(Action):\n#\n# def name(self) -> Text:\n# return \"action_hello_world\"\n#\n# def run(self, dispatcher: CollectingDispatcher,\n# tracker: Tracker,\n# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n#\n# dispatcher.utter_message(text=\"Hello World!\")\n#\n# return []\n\nfrom typing import Any, Text, Dict, List\n\nimport arrow\nimport random\nimport dateparser\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.events import SlotSet\nfrom rasa_sdk.executor import CollectingDispatcher\n\ncity_db = {\n 'cancun': 'America/Cancun',\n 'merida': 'America/Merida',\n 'matamoros': 'America/Matamoros',\n 'monterrey': 'America/Monterrey',\n 'mexico': 'America/Mexico_City',\n 'ojinaga': 'America/Ojinaga',\n 'chihuahua': 'America/Chihuahua',\n 'hermosillo': 'America/Hermosillo',\n 'mazatlan': 'America/Mazatlan',\n 'bahia banderas': 'America/Bahia_Banderas',\n 'tijuana': 'America/Tijuana',\n 'dakota': 'America/North_Dakota/Center',\n 'beulah': 'America/North_Dakota/Beulah',\n 'indianapolis': 'America/Indiana/Indianapolis',\n 'marengo':'America/Indiana/Marengo',\n 'vincennes': 'America/Indiana/Vincennes',\n 'petersburg': 'America/Indiana/Petersburg',\n 'vevay': 'America/Indiana/Vevay',\n 'louisville': 'America/Kentucky/Louisville',\n 'monticello': 'America/Kentucky/Monticello',\n 'pangnirtung': 'America/Pangnirtung'\n}\n\njokes = [\n \"I failed math so many times at school, I can’t even count.\",\n \"I used to have a handle on life, but then it broke.\",\n \"I was wondering why the frisbee kept getting bigger and bigger, but then it hit me.\",\n \"Don’t you hate it when someone answers their own questions? I do.\"\n]\n\nclass ActionTellTime(Action):\n\n def name(self) -> Text:\n return \"action_tell_time\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n current_place = next(tracker.get_latest_entity_values(\"place\"), None)\n utc = arrow.utcnow()\n \n if not current_place:\n msg = f\"It's {utc.to(city_db['mexico']).format('HH:mm')} now, but it may change in a second.\"\n dispatcher.utter_message(text=msg)\n return []\n \n current_place = current_place.lower() \n tz_string = city_db.get(current_place, None)\n if not tz_string:\n msg = f\"My database don't contain {current_place}. Sorry for the inconviniences. :)\"\n dispatcher.utter_message(text=msg)\n return []\n \n msg = f\"It's {utc.to(city_db[current_place]).format('HH:mm')} in {current_place} now. It may be changed a second when you read this message!\"\n dispatcher.utter_message(text=msg)\n \n return []\n\nclass ActionTellName(Action):\n\n def name(self) -> Text:\n return \"action_tell_name\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n name = next(tracker.get_latest_entity_values(\"name\"), None)\n utc = arrow.utcnow()\n \n if not name:\n msg = f\"Sorry, I didn't get your name, please try again :(\"\n dispatcher.utter_message(text=msg)\n return []\n msg = f\"Hello {name}! I think its better to call you human instead :)\"\n dispatcher.utter_message(text=msg)\n \n return []\n\nclass ActionTellJoke(Action):\n\n def name(self) -> Text:\n return \"action_tell_joke\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n msg = random.choice(jokes)\n dispatcher.utter_message(text=msg)\n return []\n","repo_name":"javiermomc/Chatbot","sub_path":"actions/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13676015899","text":"from player import HumanPlayer, CpuPlayer, SmartCpuPlayer\nfrom boardCheck import fullBoardCheck, xWinCheck, oWinCheck\nfrom evaluate import evaluateBoard\nimport os\nimport time\n\nclass TicTacToe:\n \n def __init__(self, x, o):\n self.board = [[0,0,0],[0,0,0],[0,0,0]] \n self.gameOver = False # initializes board as 0, 1, or -1\n \n if x == 1:\n self.x = HumanPlayer(self.board)\n elif x == -1:\n while True:\n try:\n depth = int(input(\"Depth of X cpu (can't be greater than 6) >> \"))\n if depth > 6:\n depth = 6\n break\n except:\n print(\"\\nInvalid depth\\n\")\n continue\n \n self.x = SmartCpuPlayer(self.board, True, depth)\n \n if o == 1:\n self.o = HumanPlayer(self.board)\n elif o == -1:\n while True:\n try:\n depth = int(input(\"Depth of O cpu (can't be greater than 6) >> \"))\n if depth > 6:\n depth = 6\n break\n except:\n print(\"\\nInvalid depth\\n\")\n continue\n self.o = SmartCpuPlayer(self.board, False, depth)\n\n def draw_board(self):\n\n # draws board using X's and O's in place of the number values\n os.system('cls' if os.name == 'nt' else 'clear') \n \n for c in self.board:\n for s in c: \n if s == 0:\n print('-',end=' ')\n elif s == 1:\n print('X',end=' ')\n elif s == -1:\n print('O',end=' ')\n else:\n print('-',end=' ')\n print('\\n')\n\n \n def play(self):\n\n while not self.gameOver:\n \n self.draw_board()\n # print(evaluateBoard(self.board))\n\n xMove = self.x.get_move()\n\n self.board[xMove[0]][xMove[1]] = 1 # adds move to board\n\n # checks if X wins the game after they move\n if xWinCheck(self.board):\n print(evaluateBoard(self.board))\n self.draw_board()\n print('X wins')\n self.gameOver = True\n break\n # checks if the last move was a cat's game AFTER it checks if X wins\n elif fullBoardCheck(self.board):\n print(evaluateBoard(self.board))\n self.draw_board()\n print(\"\\nCat's game\")\n self.gameOver = True\n break\n\n\n self.draw_board()\n # print(evaluateBoard(self.board))\n oMove = self.o.get_move()\n\n self.board[oMove[0]][oMove[1]] = -1 # adds move to the board\n\n if oWinCheck(self.board):\n print(evaluateBoard(self.board))\n self.draw_board()\n print('O wins')\n self.gameOver = True\n break\n\n\nif __name__ == '__main__':\n \n while True:\n os.system('cls' if os.name == 'nt' else 'clear')\n \n try:\n xPlayer = input(\"X player (1 for human, -1 for cpu) >> \")\n oPlayer = input(\"O player (1 for human, -1 for cpu) >> \")\n \n game = TicTacToe(int(xPlayer), int(oPlayer))\n game.play();\n \n except ValueError:\n os.system('cls' if os.name == 'nt' else 'clear')\n continue\n\n playAgain = input(\"\\n\\nPlay again? (y/n) >> \")\n\n if playAgain.lower() != 'y':\n os.system('cls' if os.name == 'nt' else 'clear')\n print(\"See you later!\")\n break\n","repo_name":"NoahEspi/MinimaxTicTacToe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20921436748","text":"# -*- coding: utf-8 -*-\n\nimport ROOT\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nrcParams.update({\"figure.autolayout\": True})\nplt.style.use(\"ggplot\")\n\nimport numpy as np\nimport itertools\nimport os\nimport tarfile\n\nimport luigi\nimport law\n\nfrom collections import OrderedDict\nfrom analysis.tasks.base import AnalysisTask\nfrom analysis.root import ROOTPlot\nfrom law.target.local import LocalDirectoryTarget\n\ndirname = os.path.abspath(os.path.dirname(__file__))\n\nclass PlotFromCSV(AnalysisTask):\n shift = luigi.Parameter(default=\"ALL\")\n flavor = luigi.ChoiceParameter(choices=[\"lf\", \"c\", \"hf\"])\n csv_file = os.path.join(dirname, \"sf_2018_prod11.csv\") # new\n compare_file = os.path.join(dirname, \"sf_2018_prod7.csv\") # old\n norm_to_nominal = luigi.BoolParameter()\n\n root_hf_file = os.path.join(dirname, \"scale_factors_deepjet_hf_binned.root\")\n root_lf_file = os.path.join(dirname, \"scale_factors_deepjet_lf_binned.root\")\n\n def output(self):\n return self.local_target(\"plots_{}.tgz\".format(self.shift))\n\n def run(self):\n ROOT.PyConfig.IgnoreCommandLineOptions = True\n ROOT.gROOT.SetBatch()\n\n ROOT.gSystem.Load('libCondFormatsBTauObjects')\n ROOT.gSystem.Load('libCondToolsBTau')\n\n local_tmp = LocalDirectoryTarget(is_tmp=True)\n local_tmp.touch()\n\n jes_sources = self.config_inst.get_aux(\"jes_sources\")\n shifts = []\n if self.shift == \"ALL\":\n if self.flavor == \"c\":\n shifts.extend([\"cferr1\", \"cferr2\"])\n else:\n shifts.extend([\"jes{}\".format(jes_source) for jes_source in jes_sources if jes_source != \"Total\"])\n shifts.extend([\"{}{}\".format(region, type) for region, type in\n itertools.product([\"lf\", \"hf\"], [\"\", \"stats1\", \"stats2\"])])\n elif self.shift == \"NONE\":\n shifts = []\n else:\n shifts = [self.shift]\n\n v_sys = getattr(ROOT, 'vector')()\n for shift in shifts:\n v_sys.push_back(\"up_\" + shift)\n v_sys.push_back(\"down_\" + shift)\n\n flavor_ids = self.config_inst.get_aux(\"flavor_ids\")\n binning = self.config_inst.get_aux(\"binning\")[self.flavor]\n\n pt_binning = [(start, end) for start, end in zip(binning[\"pt\"][:-1], binning[\"pt\"][1:])]\n eta_binning = [(start, end) for start, end in zip(binning[\"abs(eta)\"][:-1], binning[\"abs(eta)\"][1:])]\n\n figures = {}\n if self.compare_file is None:\n csv_files, descriptions = [self.csv_file], [\"csv\"]\n else:\n csv_files, descriptions = [self.csv_file, self.compare_file], [\"new\", \"old\"]\n for input_file, id in zip(csv_files, descriptions):\n # create calibration reader\n calib = ROOT.BTagCalibration(\"csv_{}\".format(id), input_file)\n reader = ROOT.BTagCalibrationReader(\n 3, # 0 is for loose op, 1: medium, 2: tight, 3: discr. reshaping\n \"central\", # central systematic type\n v_sys, # vector of other sys. types\n )\n for jetFlavor in [0, 1, 2]:\n reader.load(\n calib,\n jetFlavor, # 0 is for b flavour, 1: FLAV_C, 2: FLAV_UDSG\n \"iterativefit\" # measurement type\n )\n\n for pt_idx, pt_range in enumerate(pt_binning):\n for eta_idx, eta_range in enumerate(eta_binning):\n key = pt_range + eta_range\n if key not in figures:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(\"pt: %s to %s, eta: %.1f to %.1f\" % (pt_range + eta_range))\n else:\n fig, ax = figures[key]\n\n if pt_range[1] == np.inf:\n pt_val = pt_range[0] + 1\n else:\n pt_val = np.mean(pt_range)\n eta_val = np.mean(eta_range)\n\n def get_values(csv_reader, sys_type):\n x_values = np.linspace(-0.1, 1., 10000)\n y_values = []\n for csv_value in x_values:\n sf = csv_reader.eval_auto_bounds(\n sys_type, # systematic (here also 'up'/'down' possible)\n flavor_ids[self.flavor], # jet flavor\n eta_val, # absolute value of eta\n pt_val, # pt\n csv_value\n )\n y_values.append(sf)\n return np.array(x_values), np.array(y_values)\n\n x_values, nominal_values = get_values(reader, \"central\")\n if not self.norm_to_nominal:\n ax.plot(x_values, nominal_values, label=\"{}, {}\".format(id, \"nominal\"))\n\n if self.shift != \"NONE\":\n total_errors_up = np.zeros(nominal_values.shape)\n total_errors_down = np.zeros(nominal_values.shape)\n for shift in shifts:\n _, up_values = get_values(reader, \"up_\" + shift)\n _, down_values = get_values(reader, \"down_\" + shift)\n\n if len(shifts) > 1: # build envelope\n diff_up = up_values - nominal_values\n diff_down = down_values - nominal_values\n\n # shift with effect in up/down direction\n errors_up = np.max([diff_up, diff_down, np.zeros(nominal_values.shape)], axis=0)\n errors_down = np.min([diff_up, diff_down, np.zeros(nominal_values.shape)], axis=0)\n\n # add in quadrature\n total_errors_up += errors_up**2\n total_errors_down += errors_down**2\n total_errors_up = total_errors_up**0.5\n total_errors_down = total_errors_down**0.5\n\n if len(shifts) > 1:\n up_values = nominal_values + total_errors_up\n down_values = nominal_values - total_errors_down\n if self.norm_to_nominal:\n up_values /= nominal_values\n down_values /= nominal_values\n ax.plot(x_values, up_values, label=\"{}, {}\".format(id, \"up_\" + self.shift))\n ax.plot(x_values, down_values, label=\"{}, {}\".format(id, \"down\" + self.shift))\n\n if self.compare_file is None:\n if self.flavor in [\"c\", \"hf\"]:\n input_file = self.root_hf_file\n elif self.flavor == \"lf\":\n input_file = self.root_lf_file\n else:\n raise Exception(\"No .root file for c flavor SFs.\")\n\n root_file = ROOT.TFile.Open(input_file)\n\n func_name = \"csv_ratio_Pt{}_Eta{}_final\".format(pt_idx, eta_idx)\n if self.flavor == \"c\":\n func_name = \"c_\" + func_name\n\n func = root_file.Get(func_name)\n\n y_values = []\n x_values = np.linspace(-0.1, 1., 10000)\n for csv_value in x_values:\n y_val = func.Eval(csv_value)\n y_values.append(y_val)\n\n ax.plot(x_values, y_values, label=\"{}, {}\".format(\".root\", \"nominal\"))\n\n figures[key] = (fig, ax)\n\n del reader\n del calib\n\n for key, (fig, ax) in figures.items():\n ax.legend(loc=\"lower right\")\n ax.set_ylim(0., 2.)\n fig.savefig(os.path.join(local_tmp.path, \"SF_%s_%s_Pt%sTo%s_eta%.1fTo%.1f.pdf\" % ((self.flavor, self.shift) + key)))\n\n with self.output().localize(\"w\") as tmp:\n with tarfile.open(tmp.path, \"w:gz\") as tar:\n for plot_file in os.listdir(local_tmp.path):\n tar.add(os.path.join(local_tmp.path, plot_file), arcname=plot_file)\n\n\nclass PlotShiftsFromCSV(AnalysisTask, law.WrapperTask):\n shifts = law.CSVParameter(default=[], description=\"shifts to require\")\n skip_shifts = law.CSVParameter(default=[], description=\"shifts to skip, supports patterns\")\n\n flavor = PlotFromCSV.flavor\n norm_to_nominal = PlotFromCSV.norm_to_nominal\n\n wrapped_task = PlotFromCSV\n\n def __init__(self, *args, **kwargs):\n super(PlotShiftsFromCSV, self).__init__(*args, **kwargs)\n\n jes_sources = self.config_inst.get_aux(\"jes_sources\")\n\n if not self.shifts:\n self.shifts = []\n if self.flavor == \"c\":\n self.shifts = [\"cferr1\", \"cferr2\"]\n else:\n self.shifts.extend([\"jes{}\".format(jes_source) for jes_source in jes_sources if jes_source != \"Total\"])\n self.shifts.extend([\"{}{}\".format(region, type) for region, type in\n itertools.product([\"lf\", \"hf\"], [\"\", \"stats1\", \"stats2\"])])\n if self.skip_shifts:\n filter_fn = lambda d: not law.util.multi_match(d, self.skip_shifts)\n self.shifts = filter(filter_fn, self.shifts)\n\n def requires(self):\n def req(shift):\n return self.wrapped_task.req(self, shift=shift)\n\n return OrderedDict([(shift, req(shift)) for shift in self.shifts])\n\n\nclass PlotFromRoot(AnalysisTask):\n\n hf_file = \"/user/rath/Deepcsv_rwt_fit_hf_v2_final_2018_2_12test.root\"\n lf_file = \"/user/rath/Deepcsv_rwt_fit_lf_v2_final_2018_2_12test.root\"\n\n flavor = luigi.ChoiceParameter(choices=[\"hf\", \"lf\"])\n norm_to_nominal = luigi.BoolParameter()\n shift = luigi.Parameter(default=\"ALL\")\n\n def output(self):\n return self.local_target(\"plots.tgz\")\n\n def run(self):\n ROOT.PyConfig.IgnoreCommandLineOptions = True\n ROOT.gROOT.SetBatch()\n\n local_tmp = LocalDirectoryTarget(is_tmp=True)\n local_tmp.touch()\n\n if self.flavor == \"hf\":\n input_file = self.hf_file\n else:\n input_file = self.lf_file\n\n root_file = ROOT.TFile.Open(input_file)\n\n hist_tpl = \"h_csv_ratio_Pt{}_Eta{}_final\"\n\n binning = self.config_inst.get_aux(\"binning\")[self.flavor]\n n_pt_categories = len(binning[\"pt\"]) - 1\n n_eta_categories = len(binning[\"abs(eta)\"]) - 1\n hist_names = [hist_tpl.format(pt_idx, eta_idx)\n for pt_idx in range(n_pt_categories) for eta_idx in range(n_eta_categories)]\n\n ##\n ## Plot histograms with total systematic envelope\n ##\n\n for hist_name in hist_names:\n nominal_hist = root_file.Get(hist_name)\n\n errors_up = []\n errors_down = []\n # collect all shifts from file\n if self.shift == \"ALL\":\n shifts = [key.GetName().split(\"_\")[-1] for key in root_file.GetListOfKeys()\n if key.GetName().startswith(hist_name)]\n shifts = set([shift[:-2] for shift in shifts if shift[-2:] == \"Up\"])\n else:\n shifts = [self.shift]\n for shift_idx, shift in enumerate(shifts):\n hist_name_up = hist_name + \"_\" + shift + \"Up\"\n hist_name_down = hist_name + \"_\" + shift + \"Down\"\n hist_up = root_file.Get(hist_name_up)\n hist_down = root_file.Get(hist_name_down)\n\n for bin_idx in range(1, nominal_hist.GetNbinsX() + 1):\n nominal_value = nominal_hist.GetBinContent(bin_idx)\n\n # combine all shifts that have an effect in the same direction\n # effect from _up/done systematics\n diff_up = hist_up.GetBinContent(bin_idx) - nominal_value\n diff_down = hist_down.GetBinContent(bin_idx) - nominal_value\n\n\n # detect systematics where up/down shift direction is the same\n #if diff_up * diff_down > 0:\n # print \"One sided shift: {}, {}\".format(shift, category)\n\n # if multiple shifts, build envelope\n if len(shifts) != 1:\n # shift with effect in up/down direction\n error_up = max([diff_up, diff_down, 0])\n error_down = min([diff_up, diff_down, 0])\n\n # add in quadrature\n if shift_idx == 0:\n errors_up.append(error_up**2)\n errors_down.append(error_down**2)\n else:\n errors_up[bin_idx - 1] += error_up**2\n errors_down[bin_idx - 1] += error_down**2\n else:\n errors_up.append(diff_up)\n errors_down.append(-diff_down) # is subtracted later\n # multiple shifts have been added quadratically, take square root\n if len(shifts) != 1:\n errors_up = np.sqrt(errors_up)\n errors_down = np.sqrt(errors_down)\n\n # build shifted histograms\n combined_hist_up = nominal_hist.Clone()\n combined_hist_down = nominal_hist.Clone()\n\n for bin_idx in range(1, nominal_hist.GetNbinsX() + 1):\n combined_hist_up.SetBinContent(bin_idx, combined_hist_up.GetBinContent(bin_idx)\n + errors_up[bin_idx - 1])\n combined_hist_down.SetBinContent(bin_idx, combined_hist_down.GetBinContent(bin_idx)\n - errors_down[bin_idx - 1])\n\n if self.norm_to_nominal:\n combined_hist_up.Divide(nominal_hist)\n combined_hist_down.Divide(nominal_hist)\n\n plot = ROOTPlot(hist_name, hist_name)\n plot.create_pads()\n plot.cd(0, 0)\n plot.draw({\"nominal\": nominal_hist}, line_color=1)\n plot.draw({\"up\": combined_hist_up}, line_color=2)\n plot.draw({\"down\": combined_hist_down}, line_color=4)\n\n plot.save(os.path.join(local_tmp.path, \"{}.pdf\".format(hist_name)))\n\n ##\n ## Check scale factors, uncertainties, and fits\n ##\n\n for i, (pt_idx, eta_idx) in enumerate(itertools.product(range(n_pt_categories),\n range(n_eta_categories))):\n data_hist = root_file.Get(\"h_csv_Data_Pt{}_Eta{}\".format(pt_idx, eta_idx))\n if self.flavor == \"hf\":\n signal_base = \"h_csv_MC_bjets\"\n bg_base = \"h_csv_MC_nonbjets\"\n else:\n signal_base = \"h_csv_MC_nonbjets\" # actually lf\n bg_base = \"h_csv_MC_bjets\" # actually b + c\n\n signal_hist = root_file.Get(\"{}_Pt{}_Eta{}\".format(signal_base, pt_idx, eta_idx))\n bg_hist = root_file.Get(\"{}_Pt{}_Eta{}\".format(bg_base, pt_idx, eta_idx))\n\n #\n\n with self.output().localize(\"w\") as tmp:\n with tarfile.open(tmp.path, \"w:gz\") as tar:\n for plot_file in os.listdir(local_tmp.path):\n tar.add(os.path.join(local_tmp.path, plot_file), arcname=plot_file)\n","repo_name":"cms-btv-pog/jet-tagging-sf","sub_path":"analysis/scripts/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":15623,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"13205424684","text":"from numpy import vdot, zeros, where\nfrom numpy.linalg import norm\nfrom random import randint, choices\nfrom image import tronquer\n\n\ndef prochain_rayon(j, N_R, aleatoire, P, les_rayons):\n \"\"\"\n aleatoire : 0 pour schéma successif\n 1 pour aléatoire équiprobable\n 2 pour aléatoire optimisé :\n on prend le rayon j avec une probabilité\n de norme2(A[j])**2 / norme_euc(A) ** 2\n ces probabilités sont contenues dans la liste P\n \"\"\"\n # j est le dernier rayon utilisé\n if aleatoire == 0:\n return (j + 1) % N_R\n elif aleatoire == 1:\n return randint(0, N_R - 1)\n return choices(les_rayons, weights = P, k = 1)[0]\n\n\ndef ART(f0, A, R, N_ITER, aleatoire, cst):\n N_R = cst.N_THETA * cst.N_RHO # nombre de rayons\n N_P = cst.L * cst.H # nombre de pixels\n # Les vecteurs normaux aux hyperplans sont les (lignes) A[j]\n # Il est pratique de les rendre unitaires\n # On calcule la norme 2 de chaque ligne\n N = zeros((N_R, N_P))\n normes = norm(A, ord = 2, axis = 1)\n for j in range(N_R):\n N[j] = A[j] / normes[j]\n \n # Pour le choix aléatoire optimisé des rayons\n norme_A = norm(A)\n P = [(normes[j] / norme_A) ** 2 for j in range(N_R)]\n les_rayons = list(range(0, N_R))\n \n # On calcule aussi les projections orthogonales de 0 sur les hyperplans\n T = zeros((N_R, N_P))\n for j in range(N_R):\n T[j] = R[j] / normes[j] * N[j]\n\n # Initialisation\n f = f0\n j = 0\n for _ in range(N_ITER):\n f = f - vdot(f, N[j]) * N[j] + T[j]\n j = prochain_rayon(j, N_R, aleatoire, P, les_rayons)\n tronquer(f)\n return f\n","repo_name":"remigerme/algorithme-ART-python","sub_path":"src/resolution.py","file_name":"resolution.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75136202505","text":"class Solution:\n def stoneGameIX(self, stones: List[int]) -> bool:\n #helper function checks for 2 cases if alice picks stone divisible by 3 or not.\n #check iterartively for each turn if there is a stone which when added to the sum is not divisible by 3, if not then whose turn it is will lose \n #use counter for that\n n=len(stones)\n def helper(x):\n count=collections.Counter(i%3 for i in stones)\n if count[x]==0:\n return False\n curr=x\n count[x]-=1\n \n for i in range(n-1):\n found=False\n for j in range(3):\n if (j+curr)%3!=0 and count[j]>0:\n found=True\n curr=(j+curr)%3\n count[j]-=1\n break\n if not found:\n if i%2:\n return False\n return True\n return False\n return helper(1) or helper(2)\n \n ","repo_name":"bamblebam/competitive-programming","sub_path":"2021/10-October-21/15-10-21/stonegame9.py","file_name":"stonegame9.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1123617781","text":"from flet import *\nimport sqlite3\nimport flet as ft\nfrom io import BytesIO\nfrom PIL import Image\n\n\nbg='#cdb4db'\nbg2='#ffc8dd'\nbg3='#ffafcc'\nbg4='#bde0fe'\nbg5='#a2d2ff'\nclass Index1(UserControl):\n def __init__(self,page):\n super().__init__()\n self.page=page\n global conn\n conn = sqlite3.connect('Biblioteka.db', check_same_thread=False)\n\n # utworzenie tabeli uzytkownicy\n conn.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS uzytkownicy (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n imie TEXT NOT NULL,\n haslo TEXT NOT NULL,\n czy_admin INTEGER NOT NULL DEFAULT 0\n );\n \"\"\")\n\n # zapisanie zmian w bazie danych\n conn.commit()\n def build(self):\n ksiazki = ft.GridView(\n height=400,\n width=400,\n runs_count=5,\n max_extent=150,\n child_aspect_ratio=1.0,\n spacing=5,\n run_spacing=5,\n )\n cursor = conn.execute(\"SELECT id_ksiazki, tytul, autor, zdjecie FROM ksiazki\")\n books = cursor.fetchall()\n\n for book in books:\n book_id, title, author, image_data = book\n image = Image.open(BytesIO(image_data))\n print(image_data)\n image = Image.open(BytesIO(image_data))\n image.save('test_image.png')\n book_panel = Container(\n content=Column(\n [\n ft.Image(image,fit=ft.ImageFit.CONTAIN,),\n Text(title),\n Text(author),\n Row(\n [\n ElevatedButton(\"Kup\", on_click=lambda: self.buy_book(book_id),\n bgcolor=bg3,\n color=bg5,\n style=ft.ButtonStyle(\n shape={ft.MaterialState.HOVERED: ft.RoundedRectangleBorder(radius=20),\n ft.MaterialState.DEFAULT: ft.RoundedRectangleBorder(radius=2),},\n side={ft.MaterialState.DEFAULT: ft.BorderSide(1, bg4),\n ft.MaterialState.HOVERED: ft.BorderSide(2, bg4),},\n ),\n ),\n ElevatedButton(\"Wypożycz\", on_click=lambda: self.rent_book(book_id),\n bgcolor=bg3,\n color=bg5,\n style=ft.ButtonStyle(\n shape={ft.MaterialState.HOVERED: ft.RoundedRectangleBorder(radius=20),\n ft.MaterialState.DEFAULT: ft.RoundedRectangleBorder(radius=2),},\n side={ft.MaterialState.DEFAULT: ft.BorderSide(1, bg4),\n ft.MaterialState.HOVERED: ft.BorderSide(2, bg4),},\n ),\n ),\n ]\n ),\n ]\n )\n )\n ksiazki.controls.append(book_panel)\n\n\n\n glowna=Container(\n height=self.page.height,width=self.page.width,\n bgcolor=bg,\n content=Container(\n ksiazki,\n\n )\n )\n return glowna\n def buy_book(self, book_id):\n # Kod do obsługi zakupu książki\n pass\n\n def rent_book(self, book_id):\n # Kod do obsługi wypożyczenia książki\n pass","repo_name":"MateuszCh126/AplikacjaLekcje","sub_path":"AplikacjaBiblioteki/Pages/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35417787738","text":"# -*- coding: utf-8 -*-\n\"\"\"Test suite for axonius_api_client.\"\"\"\nimport os\nimport pathlib\n\nimport pytest\n\nfrom axonius_api_client import PACKAGE_ROOT\nfrom axonius_api_client.setup_env import (\n KEY_CERTWARN,\n KEY_DEFAULT_PATH,\n KEY_ENV_FILE,\n KEY_ENV_PATH,\n KEY_FEATURES,\n KEY_KEY,\n KEY_OVERRIDE,\n KEY_SECRET,\n KEY_URL,\n NO,\n YES,\n find_dotenv,\n get_env_ax,\n get_env_bool,\n get_env_connect,\n get_env_csv,\n get_env_features,\n get_env_path,\n get_env_str,\n)\n\n\nclass TestGetEnvCsv:\n def test_set(self, monkeypatch):\n monkeypatch.setenv(\"AX_TEST\", \"abc,def,ghi\")\n ret = get_env_csv(\"AX_TEST\")\n assert ret == [\"abc\", \"def\", \"ghi\"]\n\n\nclass TestGetEnvFeatures:\n def test_set(self, monkeypatch):\n monkeypatch.setenv(KEY_FEATURES, \"abc,def,ghi\")\n ret = get_env_features()\n assert ret == [\"abc\", \"def\", \"ghi\"]\n\n\nclass TestFindDotEnv:\n def test_supplied(self, monkeypatch, tmp_path):\n path = tmp_path / \".env\"\n path.touch()\n src, ret = find_dotenv(ax_env=tmp_path)\n assert src == \"supplied\"\n assert ret == str(path)\n\n def test_env_path_key(self, monkeypatch, tmp_path):\n monkeypatch.setenv(KEY_ENV_PATH, str(tmp_path))\n path = tmp_path / \".env\"\n path.touch()\n src, ret = find_dotenv()\n assert src == \"env_path\"\n assert ret == str(path)\n\n def test_default_env_path_key(self, monkeypatch, tmp_path):\n monkeypatch.delenv(KEY_ENV_PATH, raising=False)\n monkeypatch.delenv(KEY_DEFAULT_PATH, raising=False)\n monkeypatch.setenv(KEY_DEFAULT_PATH, str(tmp_path))\n path = tmp_path / \".env\"\n path.touch()\n src, ret = find_dotenv()\n assert src == \"default_path\"\n assert ret == str(path)\n\n def test_find_dotenv_not_found(self, monkeypatch, tmp_path):\n monkeypatch.delenv(KEY_ENV_PATH, raising=False)\n monkeypatch.delenv(KEY_DEFAULT_PATH, raising=False)\n monkeypatch.setenv(KEY_ENV_FILE, \"moofile\")\n old_path = os.getcwd()\n os.chdir(tmp_path)\n src, ret = find_dotenv(default=None)\n os.chdir(old_path)\n assert src == \"not_found\"\n assert ret == \"\"\n\n def test_find_dotenv_cwd(self, monkeypatch, tmp_path):\n monkeypatch.delenv(KEY_ENV_PATH, raising=False)\n monkeypatch.delenv(KEY_DEFAULT_PATH, raising=False)\n path = tmp_path / \".env\"\n path.touch()\n old_path = os.getcwd()\n os.chdir(tmp_path)\n src, ret = find_dotenv(default=None)\n os.chdir(old_path)\n assert src == \"find_dotenv_cwd\"\n assert ret == str(path)\n\n def test_find_dotenv_pkg(self, monkeypatch, tmp_path):\n monkeypatch.delenv(KEY_ENV_PATH, raising=False)\n monkeypatch.delenv(KEY_DEFAULT_PATH, raising=False)\n monkeypatch.setenv(KEY_ENV_FILE, \"test.env\")\n path = pathlib.Path(PACKAGE_ROOT) / \"test.env\"\n path.touch()\n src, ret = find_dotenv(default=None)\n path.unlink()\n assert src == \"find_dotenv_pkg\"\n assert ret.endswith(\"test.env\")\n\n\nclass TestGetEnvStr:\n def test_default(self, monkeypatch):\n ret = get_env_str(key=\"boom\", default=\"abc\")\n assert ret == \"abc\"\n\n def test_lower(self, monkeypatch):\n monkeypatch.setenv(\"AX_TEST\", \" ABC \")\n ret = get_env_str(key=\"AX_TEST\", lower=True)\n assert ret == \"abc\"\n\n def test_invalid(self, monkeypatch):\n monkeypatch.delenv(\"AX_TEST\", raising=False)\n with pytest.raises(ValueError):\n get_env_str(key=\"AX_TEST\")\n\n\nclass TestGetEnvPath:\n def test_none(self, monkeypatch):\n ret = get_env_path(key=\"boom\")\n assert ret == \"\"\n\n def test_default(self, monkeypatch):\n ret = get_env_path(key=\"boom\", default=os.getcwd())\n assert ret == pathlib.Path(os.getcwd()).expanduser().resolve()\n\n def test_default_get_dir(self, monkeypatch, tmp_path):\n path = tmp_path / \"file.test\"\n path.touch()\n ret = get_env_path(key=\"boom\", default=str(path))\n assert ret == tmp_path\n\n def test_default_noget_dir(self, monkeypatch, tmp_path):\n path = tmp_path / \"file.test\"\n path.touch()\n ret = get_env_path(key=\"boom\", default=str(path), get_dir=False)\n assert ret == path\n\n\nclass TestGetEnvBool:\n @pytest.mark.parametrize(\"value\", YES, scope=\"class\")\n def test_yes(self, value, monkeypatch):\n monkeypatch.setenv(\"AX_TEST\", value)\n ret = get_env_bool(\"AX_TEST\")\n assert ret is True\n\n @pytest.mark.parametrize(\"value\", NO, scope=\"class\")\n def test_no(self, value, monkeypatch):\n monkeypatch.setenv(\"AX_TEST\", value)\n ret = get_env_bool(\"AX_TEST\")\n assert ret is False\n\n def test_err(self, monkeypatch):\n monkeypatch.setenv(\"AX_TEST\", \"x\")\n with pytest.raises(ValueError):\n get_env_bool(\"AX_TEST\")\n\n def test_default1(self, monkeypatch):\n monkeypatch.setenv(\"AX_TEST\", \"\")\n ret = get_env_bool(\"AX_TEST\", default=\"yes\")\n assert ret is True\n\n def test_default2(self, monkeypatch):\n monkeypatch.delenv(\"AX_TEST\", raising=False)\n ret = get_env_bool(\"AX_TEST\", default=\"yes\")\n assert ret is True\n\n\nclass TestGetEnvConnect:\n def test_no_override(self, monkeypatch):\n URL = \"a\"\n KEY = \"b\"\n SEC = \"c\"\n WARN = \"yes\"\n exp = {\"url\": URL, \"key\": KEY, \"secret\": SEC, \"certwarn\": True}\n monkeypatch.setenv(KEY_URL, URL)\n monkeypatch.setenv(KEY_KEY, KEY)\n monkeypatch.setenv(KEY_SECRET, SEC)\n monkeypatch.setenv(KEY_CERTWARN, WARN)\n monkeypatch.setenv(KEY_OVERRIDE, \"no\")\n ret = get_env_connect()\n assert ret == exp\n\n def test_override(self, monkeypatch):\n URL = \"a\"\n KEY = \"b\"\n SEC = \"c\"\n monkeypatch.setenv(KEY_URL, URL)\n monkeypatch.setenv(KEY_KEY, KEY)\n monkeypatch.setenv(KEY_SECRET, SEC)\n monkeypatch.setenv(KEY_OVERRIDE, \"yes\")\n ret = get_env_connect()\n assert ret[\"url\"]\n assert ret[\"key\"]\n assert ret[\"secret\"]\n\n\nclass TestGetEnvAx:\n def test_valid(self, monkeypatch):\n monkeypatch.setenv(\"AX_TEST\", \"boom\")\n ret = get_env_ax()\n assert ret[\"AX_TEST\"] == \"boom\"\n","repo_name":"ColdSmoke627/axonius_api_client","sub_path":"axonius_api_client/tests/tests_pkg/test_setup_env.py","file_name":"test_setup_env.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"38439795514","text":"from ruamel.yaml import ScalarNode, SequenceNode, MappingNode\nfrom ruamel.yaml.constructor import RoundTripConstructor\n\nfrom contextlib import contextmanager\nfrom .util import fix_keywords\n\n\n__all__ = ['CustomConstructor']\n\n\nclass CustomConstructor(RoundTripConstructor):\n def __init__(self, *args, **kwargs):\n super().__init__(self, *args, **kwargs)\n\n self.yaml_constructors = RoundTripConstructor.yaml_constructors.copy()\n self.yaml_multi_constructors = RoundTripConstructor.yaml_multi_constructors.copy()\n\n self.contexts = [{}]\n\n def add_constructor(self, tag, f):\n self.yaml_constructors[tag] = f\n\n def add_multi_constructor(self, tag, f):\n self.yaml_multi_constructors[tag] = f\n\n @property\n def context(self):\n return self.contexts[-1]\n\n @contextmanager\n def set_context(self, **kwargs):\n new = self.contexts[-1].copy()\n new.update(**kwargs)\n self.contexts.append(new)\n yield\n self.contexts.pop()\n\n def construct_object_ignore_tag(self, node):\n if isinstance(node, ScalarNode):\n return self.construct_scalar(node)\n elif isinstance(node, SequenceNode):\n return list(self.construct_yaml_seq(node))[0]\n elif isinstance(node, MappingNode):\n return list(self.construct_yaml_map(node))[0]\n\n def construct_raw(self, node):\n if isinstance(node, ScalarNode):\n return node\n elif isinstance(node, SequenceNode):\n return node.value\n elif isinstance(node, MappingNode):\n return fix_keywords({\n self.construct_object(k, deep=True): v\n for k, v in node.value\n })\n","repo_name":"kaiser101/Packages","sub_path":"Packages/yaml_macros_engine/st3/yamlmacros/src/custom_constructor.py","file_name":"custom_constructor.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"998734618","text":"from pstats import SortKey\nimport pygame, random, sys, time, button\nfrom pygame.locals import *\n\ndef main():\n pygame.init()\n\n #############################\n # INICIALIZACIÓN VARIABLES #\n ############################\n\n #Ajustes de pantalla del juego:\n\n size = (1000, 600)\n\n screen = pygame.display.set_mode(size)\n\n clock = pygame.time.Clock() #Tener control de los frames\n\n #Definimos colores y fuentes utilizadas:\n\n fuente=pygame.font.SysFont(\"arialblack\",100)\n\n fuente2=pygame.font.SysFont(\"arialblack\",50)\n\n BLACK = (0,0,0)\n\n WHITE= (255,255,255)\n\n RED = (255,0,0)\n\n #Imagenes necesarias:\n\n background = pygame.image.load(\"imagenes/backgala.webp\").convert()\n\n background_menuInicio = pygame.image.load(\"imagenes/galaxy.webp\").convert()\n\n play_img= pygame.image.load(\"imagenes/play.webp\").convert_alpha()\n width = play_img.get_rect().width\n height = play_img.get_rect().height\n play_img = pygame.transform.scale(play_img, (width-500, height-100))\n\n start_img = pygame.image.load('imagenes/play.webp').convert_alpha()\n\n controles_img = pygame.image.load('imagenes/ajustes.webp').convert_alpha()\n\n return_img = pygame.image.load('imagenes/return.webp').convert_alpha()\n\n abandonar_img = pygame.image.load('imagenes/abandonar.webp').convert_alpha()\n\n play_again_img = pygame.image.load('imagenes/jugarotravez.webp').convert_alpha()\n\n\n\n #Botones necesarios:\n\n start_button = button.Button(480, 200, start_img, 0.05)\n\n ajustes_button = button.Button(475, 275, controles_img, 0.05)\n\n return_button = button.Button(940, 540, return_img, 0.05)\n\n abandonar_button = button.Button(930, 526, abandonar_img, 0.2)\n\n play_again_button = button.Button(400, 526, play_again_img, 0.3)\n\n #Variables para la creacion del TEXTBOX:\n\n font = pygame.font.Font(None, 32)\n\n clock = pygame.time.Clock()\n\n input_box = pygame.Rect(425, 150, 140, 32)\n\n color_inactive = pygame.Color(BLACK)\n\n color_active = pygame.Color('dodgerblue2')\n\n color = color_inactive\n\n active = False\n\n text = ''\n\n done = False\n\n\n #Creamos las clases basicas(Meteoritos y Nave)\n\n class Meteorito(pygame.sprite.Sprite):\n\n\n def __init__(self):\n super().__init__()\n self.image = pygame.image.load(\"imagenes/meteorito.png\").convert()\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()#Guardar posición\n \n\n def update(self,choque):\n if choque==True:\n self.kill()\t \n self.rect.y +=1 \n if self.rect.y > 600 :\n self.rect.y = -5\n self.rect.x = random.randrange(1000)\n \n class Nave(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.image = pygame.image.load(\"imagenes/playerN.png\").convert()\n self.imageC = pygame.image.load(\"imagenes/corazon.png\").convert()\n self.imageC.set_colorkey(BLACK)\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n\n\n #Preparamos los Sprites creados para luego controlarlos:\n\n ListaMeteoritos = pygame.sprite.Group()\n\n TodosSprite = pygame.sprite.Group()\n\n nave = Nave()\n\n TodosSprite.add(nave)\n\n\n #Coordenadas iniciales de la nave\n coord_x = 500\n coord_y = 500\n #Variables del control de movimiento\n x_speed = 0\n y_speed = 0\n\n\n\n #Creamos todos los meteoritos\n\n def crear_meteoritos(aviso):\n if aviso == True:\n meteorito=Meteorito()\n meteorito.rect.x = random.randrange(1000)\n meteorito.rect.y = random.randrange(100)\n ListaMeteoritos.add(meteorito)\n TodosSprite.add(meteorito)\n else:\n time.sleep(0.6)\n for i in range(5):\n \n meteorito = Meteorito()\n meteorito.rect.x = random.randrange(1000)\n meteorito.rect.y = random.randrange(100)\n ListaMeteoritos.add(meteorito)\n TodosSprite.add(meteorito)\n \n\n # Definimos el movimiento de la nave a traves del teclado:\n\n def movimiento_teclado(event,x_speed,y_speed):\n\n if event.type == pygame.KEYDOWN:\n\n if event.key == pygame.K_a:\n x_speed = -3\n if event.key == pygame.K_d:\n x_speed = 3\n if event.key == pygame.K_w:\n y_speed = -3\n if event.key == pygame.K_s:\n y_speed = 3\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_a:\n x_speed = 0\n if event.key == pygame.K_d:\n x_speed = 0\n if event.key == pygame.K_w:\n y_speed = 0\n if event.key == pygame.K_s:\n y_speed = 0\n\n return [x_speed,y_speed]\n\n\n #MOstrar puntos en pantalla del juego:\n\n fuentec=pygame.font.match_font(\"consolas\")\n\n def texto_pantalla(pantalla, fuente, texto, color, dimensiones):\n\n font= pygame.font.Font(fuente, dimensiones)\n superficie= font.render(texto, True, color)\n rectangulo= superficie.get_rect()\n pantalla.blit(superficie, rectangulo)\n\n\n #Variables para el control de respawn de meteoritos y control de colisiones\n Choque=False\n tiempoSinChoque=0\n avisoTiempoSinChoque=False\n crear_meteoritos(avisoTiempoSinChoque)\n\n\n\n\n puntos=0\n ranking=[]\n juego_en_pausa = False\n game_mode= \"menuInicio\"\n VIDA=3\n fin_juego=False\n\n while not fin_juego:\n\n #Control pantalla CONTROLES\n\n if game_mode== \"controles\":\n\n screen.blit(background_menuInicio, [0, 0])\n screen.blit(fuente.render(\"CONTROLES \", True,WHITE ), (300, 50))\n screen.blit(fuente2.render(\"Desplazamiento Derecha: d\", True,WHITE ), (5, 130))\n screen.blit(fuente2.render(\"Desplazamiento Izquierda: a \", True,WHITE ), (5, 200))\n screen.blit(fuente2.render(\"Desplazamiento Frontal: w \", True,WHITE ), (5, 270))\n screen.blit(fuente2.render(\"Desplazamiento Trasero: s \", True,WHITE ), (5, 330))\n screen.blit(fuente2.render(\"Menu de Pausa: p \", True,WHITE ), (5, 400))\n\n if return_button.draw(screen):\n game_mode=\"menuInicio\"\n\n for evento in pygame.event.get():\n\n if evento.type== pygame.QUIT:\n sys.exit()\n pygame.display.update()\n\n\n #Control pantalla Menu Inicial\n\n if game_mode == \"menuInicio\":\n\n screen.blit(background_menuInicio, [0, 0])\n screen.blit(fuente.render(\"CHOQUE ESPACIAL\", True,WHITE ), (150, 50))\n screen.blit(fuente2.render(\"Nombre: \", True,BLACK ), (275, 150))\n\n if start_button.draw(screen):\n game_mode=\"jugando\"\n\n if ajustes_button.draw(screen):\n game_mode=\"controles\"\n\n for evento in pygame.event.get():\n\n if evento.type== pygame.QUIT:\n sys.exit()\n\n #CONTROL DEL TEXTBOX \n if evento.type == pygame.MOUSEBUTTONDOWN:\n \n if input_box.collidepoint(evento.pos):\n \n active = not active\n else:\n active = False\n \n color = color_active if active else color_inactive\n if evento.type == pygame.KEYDOWN:\n if active:\n if evento.key == pygame.K_RETURN:\n text = ''\n elif evento.key == pygame.K_BACKSPACE:\n text = text[:-1]\n else:\n text += evento.unicode\n txt_surface = font.render(text, True, color)\n width = max(200, txt_surface.get_width()+10)\n input_box.w = width\n screen.blit(txt_surface, (input_box.x+5, input_box.y+5))\n pygame.draw.rect(screen, color, input_box, 2)\n\n pygame.display.update()\n\n #Control del modo pausa\n\n elif game_mode == \"partida_pausada\": \n reactivar = False\n while reactivar == False:\n screen.blit(fuente.render(\"JUEGO EN PAUSA\", True,WHITE ), (160, 250))\n pygame.display.flip()\n for evento in pygame.event.get():\n if evento.type== pygame.QUIT:\n sys.exit()\n if evento.type == pygame.KEYDOWN:\n if evento.key == pygame.K_p:\n game_mode = \"jugando\"\n reactivar=True\n pygame.display.flip()\n \n #Control del modo jugando\n\n elif game_mode == \"jugando\":\n \n if tiempoSinChoque == 1000:#subir de dificultad periodicamente\n screen.blit(fuente.render(\"Puntuacion\", True,WHITE ), (275, 50))\n puntos+=1\n avisoTiempoSinChoque=True\n crear_meteoritos(avisoTiempoSinChoque)\n tiempoSinChoque=0\n avisoTiempoSinChoque=False\n\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT:\n sys.exit()\n elif evento.type == pygame.KEYDOWN:\n\n if evento.key == pygame.K_p:\n \n game_mode = \"partida_pausada\"\n\n x_speed=movimiento_teclado(evento,x_speed,y_speed)[0]\n y_speed=movimiento_teclado(evento,x_speed,y_speed)[1]\n \n screen.blit(background, [0, 0])\n\n #AÑADIMOS LAS IMAGENES DE CORAZONES DEPENDIENDO DE LAS VIDAS\n if VIDA==3:\n screen.blit(pygame.transform.scale(nave.imageC,(25,25)),(510,15))\n screen.blit(pygame.transform.scale(nave.imageC,(25,25)),(550,15))\n screen.blit(pygame.transform.scale(nave.imageC,(25,25)),(470,15))\n elif VIDA==2:\n screen.blit(pygame.transform.scale(nave.imageC,(25,25)),(510,15)) \n screen.blit(pygame.transform.scale(nave.imageC,(25,25)),(550,15))\n elif VIDA==1: \n screen.blit(pygame.transform.scale(nave.imageC,(25,25)),(550,15))\n \n TodosSprite.update(Choque) \n coord_x += x_speed\n coord_y += y_speed\n nave.rect.x = coord_x\n nave.rect.y = coord_y\n numcolisiones=pygame.sprite.spritecollide(nave, ListaMeteoritos, True)\n\n for colisiones in numcolisiones:\n VIDA -=1 \n Choque =True\n avisoTiempoSinChoque=False\n TodosSprite.update(Choque)\n crear_meteoritos(avisoTiempoSinChoque)\n coord_x = 500\n coord_y = 500\n time.sleep(0.10)\n l=[]\n \n if VIDA == 0:\n tiempoSinChoque=0\n user_punt=(puntos, text)\n ranking.append(user_punt) \n game_mode=\"menu_fin\"\n\n Choque=False\n TodosSprite.draw(screen)\n\n ###DELIMITAMOS BORDES###\n if coord_x > 904 :\n coord_x=904\n elif coord_x <= 0:\n coord_x=0 \n if coord_y > 526 :\n coord_y=526\n elif coord_y <= 0:\n coord_y=0 \n\n texto_pantalla(screen, fuentec, str(puntos),RED,40) \n pygame.display.flip()\n tiempoSinChoque+=1\n \n\n clock.tick(270) #Frames por segundo \n\n #Control menu fin\n\n elif game_mode == \"menu_fin\":\n screen.blit(background_menuInicio, [0, 0])\n screen.blit(fuente.render(\"GAME OVER\", True,WHITE ), (275, 50))\n i=0\n a=300\n while i definition should have at least one index\"\n )\n","repo_name":"gabrielfalcao/plural","sub_path":"tests/functional/test_edge_definition.py","file_name":"test_edge_definition.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"27105079627","text":"from collections import defaultdict\n\nc = defaultdict(int)\n\nn = int(input())\nfor i in range(n):\n x = int(input())\n c[x] += 1\nans = 0\nfor k, v in c.items():\n if v % 2 == 1:\n ans += 1\nprint(ans)","repo_name":"silphire/atcoder","sub_path":"abc073/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17483205258","text":"from django.db import models\nfrom django.forms import ModelForm\n\nclass Offer(models.Model):\n\t#name = models.CharField(max_length = 255)\n\temail = models.EmailField(null=False)\n\tmessage = models.TextField(max_length = 2048, null=False)\n\n\tdef __unicode__(self):\n\t\treturn self.email\n\nclass OfferForm(ModelForm):\n class Meta:\n model = Offer","repo_name":"zheli/django-domain-for-sale-page","sub_path":"domain_for_sale/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11660025155","text":"import cv2\nimport mediapipe as mp\n\nimport constants as c\n\n\nclass LandmarkDetector:\n \"\"\"\n Wrapper class that handles retrieving landmarks through the mediapipe library.\n \"\"\"\n def __init__(self, mode=False, max_hands=2, detection_confidence=0.5, track_confidence=0.5):\n \"\"\"\n Constructor for the LandmarkDetector class\n :param mode: False for video feed, true for image feed\n :param max_hands: max number of hands to run detection\n :param detection_confidence: confidence in a hand identified (0 to 1)\n :param track_confidence: confidence in a hand identified (0 to 1)\n \"\"\"\n self.mode = mode\n self.maxHands = max_hands\n self.detectionCon = detection_confidence\n self.trackCon = track_confidence\n\n self.mpHands = mp.solutions.hands\n self.hands = self.mpHands.Hands(static_image_mode=self.mode,\n max_num_hands=self.maxHands,\n min_detection_confidence=self.detectionCon,\n min_tracking_confidence=self.trackCon)\n self.mpDraw = mp.solutions.drawing_utils\n\n def find_hands(self, img, draw=True):\n \"\"\"\n This function attempts to find hands in the given image. It will return an image with a bounded box around it.\n If draw is true, the landmarks will also be drawn.\n :param img: the input image\n :param draw: if true, draw the landmarks.\n :return: img with bounded box and/or landmarks drawn if detected.\n \"\"\"\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n self.results = self.hands.process(imgRGB)\n # print(self.results.multi_hand_landmarks)\n\n if self.results.multi_hand_landmarks:\n for handLms in self.results.multi_hand_landmarks:\n if draw:\n self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)\n\n # print(handLms)\n pts = self.get_bbox_coordinates(handLms, img.shape)\n\n cv2.rectangle(img, [pts[0], pts[1]], [pts[2], pts[3]], (255, 0, 255))\n\n return img\n\n def crop_hands(self, img):\n \"\"\"\n With the given image, the function will create a cropped image of the hand with equal sides\n :param img: the given image\n :return: the cropped hand if detected. none if not\n \"\"\"\n if self.results.multi_hand_landmarks:\n for handLms in self.results.multi_hand_landmarks:\n\n pts = self.get_bbox_coordinates(handLms, img.shape)\n width = pts[2] - pts[0]\n length = pts[3] - pts[1]\n l = length // 2\n if width >= length:\n l = width // 2\n\n center_x, center_y = (pts[2] + pts[0]) // 2, (pts[3] + pts[1]) // 2\n\n cropped_image = img[(center_y - l):(center_y + l), (center_x - l):(center_x + l)]\n return cropped_image\n else:\n return None\n\n def get_points(self, img, handNo=0, is_flatten=False):\n \"\"\"\n Given an image and the number of hands in the image, this function will check for a hand detection and return\n a list of landmark points. There are 21 landmark points from 0 to 20. The landmark points are normalized and\n stored in tuple with the respective indices refer to each landmark points\n :param img:\n :param handNo:\n :return:\n \"\"\"\n\n lst = []\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n self.results = self.hands.process(imgRGB)\n norm = 0\n if self.results.multi_hand_landmarks:\n myHand = self.results.multi_hand_landmarks[handNo]\n for id, lm in enumerate(myHand.landmark):\n if id == 0:\n ref_pt = lm\n\n # print(id)\n # print(lm)\n pt = [lm.x - ref_pt.x, lm.y - ref_pt.y, lm.z - ref_pt.z]\n\n norm = max(norm, abs(min(pt)), max(pt))\n\n lst = lst + pt\n lst = [x / norm for x in lst] # final step of normalization\n if is_flatten:\n return lst\n if (len(lst) == 0):\n return lst\n\n nested = []\n j = 0\n for i in range(len(lst)):\n if j == 0:\n pt = [lst[i]]\n j += 1\n continue\n pt.append(lst[i])\n\n if j == 2:\n nested.append(pt)\n j = 0\n continue\n\n j += 1\n\n return nested\n\n def get_bbox_coordinates(self, hand_ladmark, image_shape):\n \"\"\"\n Get bounding box coordinates for a hand landmark.\n Args:\n hand_ladmark: A HandLandmark object.\n image_shape: A tuple of the form (height, width).\n Returns:\n A tuple of the form (xmin, ymin, xmax, ymax).\n \"\"\"\n all_x, all_y = [], [] # store all x and y points in list\n for hnd in self.mpHands.HandLandmark:\n all_x.append(int(hand_ladmark.landmark[hnd].x * image_shape[1])) # multiply x by image width\n all_y.append(int(hand_ladmark.landmark[hnd].y * image_shape[0])) # multiply y by image height\n\n return min(all_x) - c.PADDING, min(all_y) - c.PADDING, max(all_x) + c.PADDING, max(all_y) + c.PADDING\n # return as (xmin, ymin, xmax, ymax)\n","repo_name":"jgliao248/Hand_Sign_Detection_NN","sub_path":"LandmarkDetector.py","file_name":"LandmarkDetector.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35156182372","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 1 14:17:50 2017\n\n@author: ipingou\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 1 11:18:59 2017\n\n@author: ipingou\n\"\"\"\nimport sys\nimport numpy as np\nimport math\ninput_train=sys.argv[1]\ninput_test=sys.argv[2]\n\ndef preprocess(file):\n data = np.loadtxt(file)\n features = data.shape[1]\n X = data[:,0:features-1]\n X = np.insert(X,0,1,axis=1)\n Y = data[:,-1]\n Y=Y.reshape(len(Y),1)\n return [X,Y]\n\n\ndef error(w,X,Y):\n predict = np.sign(sigmoid(np.dot(X,w))-0.5) \n #print(predict)\n return (1/X.shape[0])*sum(predict!=Y) \n \n[X,Y]=preprocess(input_train)\n[X_test, Y_test] = preprocess(input_test)\n\ndef sigmoid_element(x):\n return 1/(1+math.exp(-1*x)) \n\nsigmoid = np.vectorize(sigmoid_element)\n\ndef gradient(x,y,w):\n wt = np.transpose(w)\n return (sigmoid(-1*y*np.dot(wt,x))*-1*y*x).reshape(len(w),1)\n \ndef error(w,X,Y):\n predict = np.sign(sigmoid(np.dot(X,w))-0.5) \n #print(predict)\n return (1/X.shape[0])*sum(predict!=Y) \n \nw = np.zeros((X.shape[1],1))\nstep=0.001\nn=0\nfor i in range(2000):\n n = n%X.shape[0]\n x=np.transpose(X[n])\n y=Y[n] \n w = w-step*(gradient(x,y,w))\n #print(i)\n n=n+1\n \nprint(w)\n\nprint(\"Ein: \" ,error(w,X_test,Y_test))\nprint (\"Eout: \",error(w,X,Y))\n\n#[[ 0.01826899]\n #[-0.01308051]\n #[ 0.04072894]\n #[-0.03295698]\n #[ 0.01498363]\n #[-0.03691042]\n #[ 0.01232819]\n #[ 0.04791334]\n #[-0.02244958]\n #[ 0.02470544]\n #[ 0.06878235]\n #[ 0.01897378]\n #[-0.02032107]\n #[-0.00901469]\n #[ 0.04589259]\n #[ 0.05776824]\n #[ 0.06102487]\n #[-0.04756147]\n #[ 0.06035018]\n #[-0.01660574]\n #[-0.03509342]]","repo_name":"o20021106/machineLearningHW","sub_path":"machineLearningFoundation/hw3/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18694272765","text":"import tkinter as tk\n\nclass Tela:\n #somar nao funciona, mas o senhor nao especificou se tinha que funcionar ne :)\n def somar(self):\n self.result.set(self.num1.get() + self.num2.get())\n\n def __init__(self, master):\n\n self.janela = master\n self.janela.title(\"Soma simples\")\n self.janela.geometry(\"200x100\")\n\n self.num1 = tk.IntVar()\n self.num2 = tk.IntVar()\n self.result = tk.StringVar()\n \n \n #lbl e field para valores da soma\n lbl1 = tk.Label(self.janela, text='Número 1: ').grid(row=0, column=0)\n entry1 = tk.Entry(self.janela, textvariable=self.num1).grid(row=0, column=1)\n lbl2 = tk.Label(self.janela, text='Número 2: ').grid(row=1, column=0)\n entry1 = tk.Entry(self.janela, textvariable=self.num2).grid(row=1, column=1)\n\n\n #botao de enviar\n btn1 = tk.Button(self.janela, text='Somar >> ', width=10, command=self.somar).grid(row=2, column=0, columnspan=2, padx=10, pady=5, sticky=tk.W)\n\n #field com resultado\n \n lbl3 = tk.Label(self.janela, textvariable=self.result, bg='white', width=12).grid(row=2, column=1, padx=32, pady=5, columnspan=2)\n \n \n\nmaster = tk.Tk()\napp = Tela(master)\nmaster.mainloop()","repo_name":"nawak-otak/ufac_py","sub_path":"soma_simples.py","file_name":"soma_simples.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73489911624","text":"import subprocess\n\nclass AndroidOrchestrator: \n DEVICE_NAME=\"Pixel3\"\n EMULATOR_PATH=\"/Users/jjj/Library/Android/sdk/tools/emulator\"\n ANDROID_SIMULATOR_START_COMMAND = \"{} -avd {}\".format(EMULATOR_PATH,DEVICE_NAME) \n ANDROID_SIMULATOR_STOP_COMMAND=\"adb kill-server\" \n NVM_SWITCH_COMMAND = \"nvm use 16\"\n ANDROID_WDIO_DIR_PATH=\"/Users/jjj/Desktop/SourceCode/webdriverio\"\n RUN_TESTS_COMMAND=\"npm run androidApp\"\n NVM_INITIALIZER = \"source ~/.nvm/nvm.sh\"\n\n\n def __init__(self) -> None:\n pass\n\n def __start__android__simulator__(self):\n print(\"Starting Android Simulator....\")\n start_android = subprocess.Popen(AndroidOrchestrator.ANDROID_SIMULATOR_START_COMMAND,shell=True)\n start_android.wait()\n print(\"Android simulator successfully started.\")\n\n\n def __stop__android__simulator__(self):\n print(\"Shutting down all Android simulators....\")\n stop_android = subprocess.Popen(AndroidOrchestrator.ANDROID_SIMULATOR_STOP_COMMAND,shell=True)\n stop_android.wait()\n print(\"Android simulator successfully shut down\")\n \n def execute_tests(self):\n self.__start__android__simulator__()\n command_for_tests = \"{} && {} && cd {} && {}\".format(AndroidOrchestrator.NVM_INITIALIZER,AndroidOrchestrator.NVM_SWITCH_COMMAND,AndroidOrchestrator.ANDROID_WDIO_DIR_PATH,AndroidOrchestrator.RUN_TESTS_COMMAND)\n print(\"starting execution of test cases...\") \n test_executor = subprocess.Popen(command_for_tests,shell=True,executable='/bin/zsh')\n exit_code=test_executor.wait()\n print(\"{} is the exit code for the test process\".format(exit_code))\n print(\"done executing test cases.\")\n self.__stop__android__simulator__()\n","repo_name":"jithinjosejacob/mobileTestOrchestrator","sub_path":"Android/AndroidOrchestrator.py","file_name":"AndroidOrchestrator.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11020372744","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.stats as stats\nimport logging\n\nclass EDA:\n \n def __init__(self):\n logging.basicConfig(filename=\"../logs/keep_track.log\", level=logging.INFO, format=\"time: %(asctime)s, function: %(funcName)s, module: %(name)s, message: %(message)s \\n\")\n \n def unique_col(self, df):\n \"\"\"\n A function to return unique columns\n \"\"\"\n logging.info(\"successfully returned unique cols\")\n return(df.apply(lambda x: len(x.unique())).sort_values(ascending=False).head(10))\n \n def duplicate(self, df):\n \"\"\"\n A function to return duplicates\n \"\"\"\n dups = df.duplicated()\n print(\"There are duplicates: {}\".format(dups.any()))\n logging.info(\"successfully returned duplicates\")\n return(df[dups])\n \n def df_info(self, df):\n \"\"\"\n A function to return dataset info\n \"\"\"\n logging.info(\"successfully displayed info\")\n return (df.describe().T.style.bar(subset=['mean'], color='#205ff2').background_gradient(subset=['std'], cmap='Reds').background_gradient(subset=['50%'], cmap='coolwarm'))\n \n def plot_graph(self, x, data):\n \"\"\"\n A function to plot bargraph\n \"\"\"\n logging.info(\"successfully plotted\")\n plt.figure(figsize=(12, 6))\n sns.countplot(x=x, data=data, palette='rocket')\n \n def plot_violin(self, df, y):\n \"\"\"\n A function to plot violin plot\n \"\"\"\n df_n_2 = (df - df.mean()) / (df.std())\n df = pd.concat([y,df_n_2.iloc[:,0:15]],axis=1)\n df = pd.melt(df,id_vars=\"diagnosis\", var_name=\"features\", value_name=\"value\")\n\n plt.figure(figsize=(10,10))\n sns.violinplot(x=\"features\", y=\"value\", hue=\"diagnosis\", data=df,split=True, inner=\"quart\",palette =\"Set2\")\n plt.xticks(rotation=90)\n\n df = pd.concat([y,df_n_2.iloc[:,15:30]],axis=1)\n df = pd.melt(df,id_vars=\"diagnosis\", var_name=\"features\", value_name='value')\n plt.figure(figsize=(10,10))\n sns.violinplot(x=\"features\", y=\"value\", hue=\"diagnosis\", data=df,split=True, inner=\"quart\",palette =\"Set2\")\n plt.xticks(rotation=90)\n logging.info(\"succesfully ploted violin plot\")\n \n def check_outliers(self, df, y):\n \"\"\"\n A function to check outliers\n \"\"\"\n df_std = (df - df.mean()) / (df.std()) \n df = pd.concat([y,df_std.iloc[:,0:10]],axis=1)\n df = pd.melt(df,id_vars=\"diagnosis\", var_name=\"features\", value_name='value')\n plt.figure(figsize=(17,5))\n sns.boxplot(x=\"features\", y=\"value\", hue=\"diagnosis\", data=df)\n\n df = pd.concat([y,df_std.iloc[:,10:20]],axis=1)\n df = pd.melt(df,id_vars=\"diagnosis\", var_name=\"features\", value_name='value')\n plt.figure(figsize=(17,5))\n sns.boxplot(x=\"features\", y=\"value\", hue=\"diagnosis\", data=df)\n\n df = pd.concat([y,df_std.iloc[:,20:30]],axis=1)\n df = pd.melt(df,id_vars=\"diagnosis\", var_name=\"features\", value_name='value')\n plt.figure(figsize=(20,5))\n sns.boxplot(x=\"features\", y=\"value\", hue=\"diagnosis\", data=df)\n logging.info(\"plot outliers\")\n \n def joint_plot(self, df, col1, col2):\n \"\"\"\n A function to plot joint plot\n \"\"\"\n sns.set(style=\"white\", color_codes=True)\n jp=sns.jointplot(df.loc[:,col1], df.loc[:,col2], kind=\"reg\",color=\"b\")\n r, p = stats.pearsonr(df.loc[:,col1], df.loc[:,col2])\n jp.ax_joint.annotate(f'$\\\\rho = {r:.3f}, p = {p:.3f}$',\n xy=(0.1, 0.9), xycoords='axes fraction',\n ha='left', va='center',\n bbox={'boxstyle': 'round', 'fc': 'powderblue', 'ec': 'navy'})\n logging.info(\"successfully plotted joint plot\")\n \n\n def corr(self, x, y, **kwargs):\n \"\"\"\n Function to calculate correlation coefficient between two arrays\n \"\"\"\n # Calculate the value\n coef = np.corrcoef(x, y)[0][1]\n # Make the label\n label = r'$\\rho$ = ' + str(round(coef, 2))\n\n # Add the label to the plot\n ax = plt.gca()\n ax.annotate(label, xy = (0.2, 0.95), size = 11, xycoords = ax.transAxes)\n logging.info(\"successfully created correlation function\")\n \n \n def plot_heatmap(self, df):\n \"\"\"\n Function to plot heatmap\n \"\"\"\n f, ax = plt.subplots(figsize = ( 12, 10))\n sns.heatmap( df.corr(), annot = True, linewidth = 0.5, fmt = '.1f', ax = ax )\n logging.info(\"successfully plotted heatmap\")\n \n \nif __name__==\"__main__\":\n eda = EDA()\n\n ","repo_name":"sel6/causal_inference","sub_path":"scripts/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9443793160","text":"from utils import mask_card, format_date, filter_data_by_executed, get_all_operations, formatted_data\n\n\ndef test_mask_card():\n assert mask_card(\"Visa Classic 6831982476737658\") == 'Visa Classic 6831 98** **** 7658'\n assert mask_card(\"Счет 35383033474447895560\") == \"Счет **5560\"\n\n\ndef test_formate_data():\n assert format_date(\"2019-04-04T23:20:05.206878\") == \"04.04.2019\"\n\n\ndef test_filter_data_by_executed():\n data_for_test = [\n {\n 'id': 1,\n 'state': 'EXECUTED'\n },\n {\n 'id': 2,\n 'state': 'CANCELED'\n },\n {\n 'id': 3,\n 'state': 'EXECUTED'\n }\n ]\n expected_response = [\n {\n 'id': 1,\n 'state': 'EXECUTED'\n },\n {\n 'id': 3,\n 'state': 'EXECUTED'\n }\n ]\n assert filter_data_by_executed(data_for_test) == expected_response\n\n\ndef test_formatted_data():\n data_for_test = {\n \"id\": 441945886,\n \"date\": \"2019-08-26T10:50:58.294041\",\n \"operationAmount\": {\n \"amount\": \"31957.58\",\n \"currency\": {\n \"name\": \"руб.\",\n \"code\": \"RUB\"\n }\n },\n \"description\": \"Перевод организации\",\n \"from\": \"Maestro 1596837868705199\",\n \"to\": \"Счет 64686473678894779589\"\n }\n expected_response = ('26.08.2019 Перевод организации\\n'\n 'Maestro 1596 83** **** 5199 -> Счет **9589\\n'\n '31957.58 руб.\\n')\n assert formatted_data(data_for_test) == expected_response\n\n\n# def test_get_all_operations():\n# assert type(get_all_operations()) == str\n","repo_name":"BadDrummer/Course_work_3","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44073973535","text":"class Solution(object):\n def shiftGrid(self, grid, k):\n \"\"\"\n :type grid: List[List[int]]\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n m = len(grid)\n n = len(grid[0])\n \n nums = [num for row in grid for num in row]\n start_idx = (m*n) - k % (m*n)\n nums = nums[start_idx:] + nums[:start_idx]\n res = []\n a = iter(nums)\n\n for _ in range(m):\n tmp = []\n for _ in range(n):\n tmp.append(next(a))\n res.append(tmp)\n \n return res","repo_name":"ericcheng09/LeetCodeSolution_Python","sub_path":"Scripts/1260.py","file_name":"1260.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73591110345","text":"# -------------------------------------------------------------\n# 限定:父元素的第n个子节点 :nth-child(n) 正 nth-last-child(n) 倒\n\nfrom selenium import webdriver\nimport time\nwd = webdriver.Chrome(r'd:\\github\\chromedriver.exe')\nwd.implicitly_wait(5)\n\nwd.get('http://cdn1.python3.vip/files/selenium/sample1b.html')\n\nelement = wd.find_elements_by_css_selector(\n 'span:nth-child(2)') # 格式 / 限定:是处在位置为第二个的span元素\nfor ele in element:\n print(ele.get_attribute('outerHTML'))\n\n\n# -----------------------------------------------------------------\n# 父元素的第几个某类型的子节点 :nth-of-type(n) 正 :nth-last-of-type(n) 倒 以类型来区别\n# span:nth-of-type(3) 属于span类型的第三个 区别于子节点 子节点需确定在整个中span的位置 而 type不用\n\nelement = wd.find_elements_by_css_selector('span:nth-of-type(2)')\nfor ele in element:\n print(ele.get_attribute('outerHTML'))\n\n# ---------------------------------------------------------------------\n# 奇偶节点 :nth-child(odd) 奇 :nth-child(even) 偶 无关类型 只管所在位置\n# ---------------------------------------------------------------------\n# 兄弟节点\n# 相邻兄弟节点:+ span + p span后相邻的p节点\n# 之后所有节点 ~ span ~ p span后的所有的p节点\nelement = wd.find_elements_by_css_selector('#t1 > span ~ p')\nfor ele in element:\n print(ele.text)\n","repo_name":"asherboy1/huhu-selenium","sub_path":"css2.py","file_name":"css2.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13250215883","text":"from kombu import Connection\nfrom sqlalchemy import create_engine\n\nfrom classic.messaging_kombu import KombuPublisher\nfrom classic.sql_storage import TransactionContext\n\nfrom book_service.adapters import books_api, database, message_bus\nfrom book_service.application import services\n\n\nclass Settings:\n db = database.Settings()\n books_api = books_api.Settings()\n message_bus = message_bus.Settings()\n\n\nclass DB:\n engine = create_engine(Settings.db.DB_URL)\n database.metadata.create_all(engine)\n\n context = TransactionContext(bind=engine)\n\n books_repo = database.repositories.BooksRepo(context=context)\n\n\nclass PublisherMessageBus:\n connection = Connection(Settings.message_bus.BROKER_URL)\n message_bus.broker_scheme.declare(connection)\n\n publisher = KombuPublisher(\n connection=connection,\n scheme=message_bus.broker_scheme,\n )\n\n\nclass Application:\n books = services.BookService(\n books_repo=DB.books_repo, publisher=PublisherMessageBus.publisher\n )\n is_dev_mode = Settings.books_api.IS_DEV_MODE\n\n\nclass ConsumerMessageBus:\n consumer = message_bus.create_consumer(\n PublisherMessageBus.connection, Application.books\n )\n\n @staticmethod\n def declare_scheme():\n message_bus.broker_scheme.declare(PublisherMessageBus.connection)\n\n\nclass Aspects:\n services.join_points.join(DB.context)\n books_api.join_points.join(PublisherMessageBus.publisher, DB.context)\n\n\napp = books_api.create_app(\n books=Application.books, is_dev_mode=Application.is_dev_mode\n)\n\nif __name__ == \"__main__\":\n from wsgiref import simple_server\n\n with simple_server.make_server('', 8000, app=app) as server:\n server.serve_forever()\n\n # hupper - m\n # waitress - -port = 8000 - -host = 127.0\n # .0\n # .1\n # user_service.composites.users_api: app\n","repo_name":"RewCrew/pet_project_private_library","sub_path":"book_service/composites/app_api.py","file_name":"app_api.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4967278173","text":"import ProcessDataFunc\nimport numpy as np\nimport copy\n\n#INPUT:一个原始的schedule\n#OUTPUT:LSN后schedule的方差\n\n# 每个trip的开始和结束时间\nbegintime, endtime = ProcessDataFunc.TimeTable()\n# 每个trip的性质0上行 1下行\ntrip_a = ProcessDataFunc.Attribute()\n# 总任务数\nTotalTaskNum = len(begintime)\n# 邻接矩阵\nMatrix = ProcessDataFunc.GenerateMatrix()\n\n#确定切割顺序\ndef DecomposeRange(Tknum, Tmaxnum):\n DR = []\n middle = Tmaxnum // 2\n for i in range(middle, Tmaxnum - Tknum + 1):\n DR.append(i)\n DR.append(Tmaxnum - i)\n return DR\n\n#将班从大到小排序\ndef resortshift(schedule):\n n = schedule.DriverNum\n for i in range(n):\n for j in range(0, n-i-1):\n if schedule.DriverList[j].TaskNum < schedule.DriverList[j + 1].TaskNum:\n schedule.DriverList[j], schedule.DriverList[j + 1] = schedule.DriverList[j + 1], schedule.DriverList[j]\n return schedule\n\n#将该班分解为两个班\ndef Decompose(T, h):\n global begintime, endtime\n T.Task2 = T.Task1[h:]\n T.Task1 = T.Task1[:h]\n T.divide = 1\n T.TaskNum = len(T.Task1) + len(T.Task2)\n T.beginTime1 = begintime[T.Task1[0]]\n T.endTime1 = endtime[T.Task1[-1]]\n if T.Task2 == []:\n T.beginTime2 = 0\n T.endTime2 = 0\n T.divide = 0\n else:\n T.beginTime2 = begintime[T.Task2[0]]\n T.endTime2 = endtime[T.Task2[-1]]\n# 没有Swap成功再将其结合\ndef Compose(T):\n global begintime, endtime\n T.Task1 = T.Task1 + T.Task2\n T.beginTime1 = begintime[T.Task1[0]]\n T.endTime1 = endtime[T.Task1[-1]]\n T.Task2 = []\n T.beginTime2 = 0\n T.endTime2 = 0\n T.divide = 0\n T.TaskNum = len(T.Task1) + len(T.Task2)\n\n # 1 采取分班制\n # 2 连续排班制\n# Tk和Tmax1进行加结合\ndef combineshift1(Tmax, Tk):\n global trip_a\n # Tmax = Tmax1 + Tmax2 ---》Tmax1 + Tk\n # Tk = Tk + Null ---》Tmax2 + Null\n #1 连接时间为1-15版本\n #if (Tmax.endTime1 + 1 < Tk.beginTime1) and (Tmax.endTime1 + 15 > Tk.beginTime1) and (trip_a[Tmax.Task1[-1]] != trip_a[Tk.Task1[0]]):#Tmax2和Tk互换 Tmax1 -》Tk\n #2 只要能连接上版本\n #if Tmax.endTime1 < Tk.beginTime1:\n #3 根据邻接矩阵连接版本\n if Matrix[Tmax.Task1[-1]][Tk.Task1[0]]==1:\n Tmax.Task2, Tk.Task1 = Tk.Task1, Tmax.Task2#Tmax2和Tk互换\n Tmax.divide = 1\n Tk.divide = 0\n return 1\n # Tmax = Tmax1 + Tmax2 ---》Tk + Tmax1\n # Tk = Tk + Null ---》Tmax2 + Null\n # 1 连接时间为1-15版本\n #elif (Tk.endTime1 + 1 < Tmax.beginTime1) and (Tk.endTime1 + 15 > Tmax.beginTime1) and (trip_a[Tmax.Task1[0]] != trip_a[Tk.Task1[-1]]):#Tmax2和Tk互换 Tk -》Tmax1\n # 2 只要能连接上版本\n #elif Tk.endTime1 < Tmax.beginTime1:\n # 3 根据邻接矩阵连接版本\n elif Matrix[Tk.Task1[-1]][Tmax.Task1[0]]==1:\n #先Tmax2 和Tk互换\n Tmax.Task2, Tk.Task1 = Tk.Task1, Tmax.Task2\n #再Tmax1和Tmax2互换\n Tmax.Task1, Tmax.Task2 = Tmax.Task2, Tmax.Task1\n Tmax.divide = 1\n Tk.divide = 0\n return 1\n else:\n return 0\n\n# Tk和Tmax2进行加结合\ndef combineshift2(Tmax, Tk):\n # Tmax = Tmax1 + Tmax2 ---》Tk + Tmax2\n # Tk = Tk + Null ---》Tmax1 + Null\n # 1 连接时间为1-15版本\n #if (Tk.endTime1 + 1 < Tmax.beginTime2) and (Tk.endTime1 + 15 > Tmax.beginTime2) and (trip_a[Tmax.Task2[0]] != trip_a[Tk.Task1[-1]]): # Tmax1和Tk互换 Tk -》Tmax2\n # 2 只要能连接上版本\n # if Tk.endTime1 < Tmax.beginTime2:\n # 3 根据邻接矩阵连接版本\n if Matrix[Tk.Task1[-1]][Tmax.Task2[0]]==1:\n Tmax.Task1, Tk.Task1 = Tk.Task1, Tmax.Task1 # Tmax1和Tk互换\n Tmax.divide = 1\n Tk.divide = 0\n return 1\n # Tmax = Tmax1 + Tmax2 ---》Tmax2 + Tk\n # Tk = Tk + Null ---》Tmax1 + Null\n # 1 连接时间为1-15版本\n #elif (Tmax.endTime2 + 1 < Tk.beginTime1) and (Tmax.endTime2 + 15 > Tk.beginTime1) and (trip_a[Tmax.Task2[-1]] != trip_a[Tk.Task1[0]]): # Tmax1和Tk互换 Tmax2 -》Tk\n # 2 只要能连接上版本\n #elif Tmax.endTime2 < Tk.beginTime1:\n # 3 根据邻接矩阵连接版本\n elif Matrix[Tmax.Task2[-1]][Tk.Task1[0]]==1:\n # 先Tmax1 和Tk互换\n Tmax.Task1, Tk.Task1 = Tk.Task1, Tmax.Task1 # Tmax1和Tk互换\n # 再Tmax1和Tmax2互换\n Tmax.Task1, Tmax.Task2 = Tmax.Task2, Tmax.Task1\n Tmax.divide = 1\n Tk.divide = 0\n return 1\n else:\n return 0\n\ndef SwapNeighbor(Tmax, Tk):\n Tmax1num = len(Tmax.Task1)\n Tmax2num = len(Tmax.Task2)\n if Tmax1num >= Tmax2num: #Tmax1大 优先选Tmax2与Tk结合\n if combineshift2(Tmax, Tk):\n return 1\n elif combineshift1(Tmax, Tk):\n return 1\n else:\n return 0\n else:\n if combineshift1(Tmax, Tk):\n return 1\n elif combineshift2(Tmax, Tk):\n return 1\n else:\n return 0\n\ndef Updateshift(schedule):\n global begintime, endtime\n schedule.DriverNum = len(schedule.DriverList)\n for i in range(schedule.DriverNum):\n T = schedule.DriverList[i]\n T.TaskNum = len(T.Task1) + len(T.Task2)\n if T.Task1 == [] and T.Task2 != []:\n T.Task1, T.Task2 = T.Task2, T.Task1\n T.divide = 0\n if T.Task2 == []:\n T.divide = 0\n T.beginTime1 = begintime[T.Task1[0]]\n T.endTime1 = endtime[T.Task1[-1]]\n if T.divide == 1:\n if T.Task2 == []:\n T.beginTime2 = 0\n T.endTime2 = 0\n else:\n T.beginTime2 = begintime[T.Task2[0]]\n T.endTime2 = endtime[T.Task2[-1]]\n for i in range(schedule.DriverNum):\n ftrip = schedule.DriverList[i].Task1[0]\n ltrip = schedule.DriverList[i].Task1[-1]\n start = begintime[ftrip]\n end = endtime[ltrip]\n schedule.DriverList[i].workinghours = end - start\n\n\n\n\ndef LSN(schedule, lvs_iter):\n # 更新shift 赋值begintime和endtime\n Updateshift(schedule)\n # 对其进行司机按照班次多少进行排序 多---》少\n resortshift(schedule)\n # 司机数保持不变\n n = schedule.DriverNum\n # 计算swap前的方差\n\n workinghours=[]\n for i in range(n):\n workinghours.append(schedule.DriverList[i].workinghours)\n\n Variance1 = np.var(workinghours)\n Variance2 = np.var(workinghours)\n print('Driver NUm=', n)\n print('Before Swap:', 'VarianceBefore = ', Variance1)\n\n\n print('lvs_iter=',lvs_iter)\n lvs_iter= lvs_iter+1\n # for LVSiter in range(lvs_iter):\n while Variance1 >= Variance2:\n lvs_schedule = copy.deepcopy(schedule) #备份\n # Local Swap Neighbor\n breakflag = 1 # 判断有没有进行swap,要不要update 重新LSN\n while breakflag == 1:\n # 选取Tmax\n for i in range(n):\n resortshift(schedule)\n breakflag = 0#表示没有swap\n Tmax = schedule.DriverList[i]\n Tmaxnum = Tmax.TaskNum\n if Tmax.divide == 1 or Tmaxnum <= 1:\n continue\n else:\n # 选取Tk\n for j in range(n-1, -1, -1):\n Tk = schedule.DriverList[j]\n Tknum = Tk.TaskNum\n if Tk.divide == 1:\n continue\n else:\n if Tknum <= 0.5 * Tmaxnum:\n Drange = DecomposeRange(Tknum, Tmaxnum)\n for h in Drange:#h取值范围为Tk~Tmax-Tk\n Decompose(Tmax, h)\n if SwapNeighbor(Tmax, Tk) == 1:#swap成功\n Updateshift(schedule)\n breakflag = 1\n break\n else:\n Compose(Tmax)\n Updateshift(schedule)\n resortshift(schedule)\n if breakflag == 1:\n break\n if breakflag == 1:\n break\n\n workinghours = []\n for i in range(n):\n Compose(schedule.DriverList[i])\n stime = begintime[schedule.DriverList[i].Task1[0]]\n etime = endtime[schedule.DriverList[i].Task1[-1]]\n workinghours.append(etime-stime)\n VarianceAfter = np.var(workinghours)\n Updateshift(schedule)\n resortshift(schedule)\n Variance1 = Variance2 #lvs_schedule\n Variance2 = VarianceAfter #schedule\n print('-->',VarianceAfter,end='')\n if Variance1==Variance2:\n break\n\n print('After Swap:', 'VarianceAfter = ', Variance1)\n resortshift(lvs_schedule)\n\n\n return lvs_schedule\n","repo_name":"QINGYWuuu/GLVS","sub_path":"lvs.py","file_name":"lvs.py","file_ext":"py","file_size_in_byte":9010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36034085563","text":"#! /usr/bin/env python\n\nfrom numpy import angle\nimport rospy\nimport rospkg\nimport numpy as np\nimport sys\nfrom std_msgs.msg import Float64\nfrom morai_msgs.msg import EgoVehicleStatus\nfrom vesc_msgs.msg import VescStateStamped\n\nclass EgoReceiver():\n \n def __init__(self):\n\n rospy.init_node('WeWannaSpeed', anonymous=False)\n self.subEgo_speed = rospy.Subscriber(\"/sensors/core\", VescStateStamped, self.speed_callback)\n self.subEgo_angle = rospy.Subscriber(\"/sensors/servo_position_command\", Float64, self.angle_callback)\n self.subEgo_topic = rospy.Subscriber(\"/Ego_topic\", EgoVehicleStatus, self.Ego_callback)\n \n self.pubEgo_speed = rospy.Publisher(\"/commands/motor/speed\", Float64, queue_size=10)\n self.pubEgo_angle = rospy.Publisher(\"/commands/servo/position\", Float64, queue_size=10)\n \n self.cmd_speed = 0\n self.cmd_angle = 0.5\n\n #rospack=rospkg.RosPack()\n #pkg_path = rospack.get_path('wecar_ros')\n #full_path = pkg_path+'/scripts/'+'Ego_data.txt'\n #self.f = open(full_path, 'a')\n\n rospy.on_shutdown(self.Ego_shutdown)\n rospy.spin()\n\n def Ego_callback(self, data):\n Ego_header = data.header\n id=data.unique_id\n \n acc_x=data.acceleration.x\n acc_y=data.acceleration.y\n acc_z=data.acceleration.z\n \n pos_x=data.position.x\n pos_y=data.position.y\n pos_z=data.position.z\n\n vel_x=data.velocity.x\n vel_y=data.velocity.y\n vel_z=data.velocity.z\n \n heading=data.heading\n accel=data.accel\n brake=data.brake\n angle=data.wheel_angle\n\n custom_Ego = EgoVehicleStatus()\n custom_Ego.header=data.header\n custom_Ego_speed = Float64()\n custom_Ego_position = Float64()\n \n #print(angle)\n \n #print(\"data len : {}\".format(len(data.ranges)))\n\n #self.f.write(str(lat)+'\\n')\n goal_speed = self.change_speed(accel)\n goal_angle = self.change_position(angle)\n\n custom_Ego_speed.data = goal_speed\n custom_Ego_position.data = goal_angle\n \n #self.pubEgo_speed.publish(custom_Ego_speed)\n #self.pubEgo_angle.publish(custom_Ego_position)\n \n #print(custom_Ego_speed)\n def speed_callback(self, data):\n ego_speed = data.state.speed\n if ego_speed < 2000:\n cmd_speed = 10000\n else:\n cmd_speed = 10000\n Ego_speed_msg = Float64()\n Ego_speed_msg.data = cmd_speed\n \n #print(cmd_speed)\n \n self.pubEgo_speed.publish(Ego_speed_msg)\n \n def angle_callback(self, data):\n ego_angle = data.data\n now_angle = ((ego_angle*2)-1)*19.5\n '''if now_angle < 10:\n cmd_angle = 11\n else:\n cmd_angle = 9'''\n cmd_angle = round(((ego_angle/19.5)+1)/2.5)\n if cmd_angle >= 1:\n cmd_angle = 1\n elif cmd_angle <= -1:\n cmd_angle = -1 \n \n Ego_angle_msg = Float64()\n Ego_angle_msg.data = cmd_angle\n \n #print(cmd_angle)\n \n self.pubEgo_angle.publish(Ego_angle_msg)\n \n \n def Ego_shutdown(self):\n print(\"I'm dead!\")\n #self.f.close()\n custom_Ego_speed=0\n custom_Ego_position=0.5\n self.pubEgo_speed.publish(custom_Ego_speed)\n self.pubEgo_angle.publish(custom_Ego_position)\n \n def change_speed(self, accel):\n accel += 0.1\n \n #print(accel)\n return accel\n \n def change_position(self, angle):\n if angle < 100:\n angle += 1\n else :\n angle -= 1\n\n return angle\n\nif __name__==\"__main__\":\n\n ER = EgoReceiver()\n","repo_name":"haeinO/Virtual_Environment","sub_path":"wecar_ros/scripts/kuad.py","file_name":"kuad.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3958579276","text":"import time, sys, json, os\ndef sprint (s):\n for c in s + '\\n':\n sys.stdout.write (c)\n sys.stdout.flush ()\n time.sleep (1. / 15)\nDISCLAIMER =\"This is NOT to solve your problems, It is to practice on robots so you can be ready... Or just to have fun ;)\"\nsprint (\"Hello!\")\ntime.sleep (2)\nsprint (\"DISCLAIMER: \" + DISCLAIMER)\ndef ssim ():\n sprint (\"Do you wish to continue?\")\n startsim = input (\"(lower case!!) y/n \")\n if startsim == \"n\":\n sprint(\"I see how it is...\")\n spinrt(\"Ok, ok.. Cya, I guess...\")\n time.sleep(1.5)\n sprint(\"Why would you open this application just to say no?\")\n time.sleep(2)\n sprint(\"Y'know what?\")\n time.sleep(.7)\n sprint(\"I dont care\")\n time.sleep(.7)\n sprint(\"You are gonna do it anyway\")\n open(os.path.join(sys.path[0], \"stage1.py\"), \"r\")\n elif startsim == \"y\":\n sprint (\"Alright!\")\n sprint (\"Starting simulator...\")\n time.sleep(2)\n open(os.path.join(sys.path[0], \"stage1.py\"), \"r\")\n else:\n os.system(\"clear\")\n sprint (\"Please choose y (Yes) or n (No)\")\n ssim()\nssim()\n","repo_name":"Catteleya/therapist-simulator","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74160523464","text":"from faker import Faker\n\n\nclass User:\n def __init__(self, email: str, first_name: str, last_name: str,\n age: int, address: str, gender: str, job: str, has_children_under_16: bool):\n self.email = email\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n self.address = address\n self.gender = gender\n self.job = job\n self.has_children_under_16 = has_children_under_16\n \n def to_json(self) -> dict:\n return self.__dict__\n \n @staticmethod\n def generate_random_user():\n fake = Faker()\n fake_gender = fake.random_element(elements=('F', 'M'))\n fake_age = fake.pyint(min_value=12, max_value=78, step=1)\n fake_first_name = fake.first_name_female() if fake_gender == 'F' else fake.first_name_male()\n fake_last_name = fake.last_name_female() if fake_gender == 'F' else fake.last_name_male()\n return User(\n first_name=fake_first_name,\n last_name=fake_last_name,\n age=fake_age,\n gender=fake_gender,\n email=fake.email(),\n address=fake.address(),\n job=fake.job(),\n has_children_under_16=fake.pybool() if 19 < fake_age < 60 else False\n )","repo_name":"darkwizz/kafka-homework","sub_path":"producer/entities/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32571019825","text":"class Setting:\n \"\"\"存储设置\"\"\"\n\n def __init__(self):\n \"\"\"初始化游戏静态设置\"\"\"\n self.screen_width = 1200\n self.screen_height = 800\n # 设置背景色\n self.bg_color = (230, 230, 230)\n\n self.ship_limit=3\n\n self.bullet_width=3\n self.bullet_height=15\n self.bullet_color=(60,60,60)\n\n self.bullet_max=10\n\n self.fleet_drop_speed=10\n\n # 加快游戏速度\n self.speedup_scal=1.1\n # 随速度增加外星人分数\n self.score_scal=1.5\n\n self.alien_points=50\n\n self.initialize_dynamic_settings()\n\n def initialize_dynamic_settings(self):\n \"\"\"初始化随游戏变化的设置\"\"\"\n\n self.ship_speed=1.5\n self.bullet_speed=3.0\n self.alien_speed=1.0\n\n # fleet_direction 1:右移 -1:左移\n self.fleet_direction=1\n\n def increase_speed(self):\n self.ship_speed*=self.speedup_scal\n self.bullet_speed*=self.speedup_scal\n self.alien_speed*=self.speedup_scal\n\n self.alien_points=int(self.alien_points*self.score_scal)\n print(self.alien_points)","repo_name":"Fzx-fire/AlienInvasion","sub_path":"Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9740406184","text":"# python\n# -*- coding: utf-8 -*-\n# Добавил строку из мастрерславля, после которой все заработало.... хер знает, может поможет. Это для кодировок.\nfrom __future__ import unicode_literals\n\nfrom datetime import date, timedelta, datetime\nfrom random import randint\n\nfrom kivy.app import App\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.graphics import *\nfrom kivy.graphics.vertex_instructions import RoundedRectangle\nfrom kivy.properties import ListProperty, NumericProperty\nfrom kivy.storage.dictstore import DictStore\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.uix.behaviors import ButtonBehavior\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.effectwidget import EffectWidget, EffectBase\nfrom kivy.uix.label import Label\nfrom kivy.uix.modalview import ModalView\nfrom kivy.uix.screenmanager import Screen, ScreenManager, FadeTransition\nfrom kivy.uix.widget import Widget\n\n# У МЕНЯ НИХЕРА НЕ ЗАПУСКАЕТСЯ!!!!!!!!\n# from kivy.graphics.vertex_instructions import RoundedRectangle\n\n# Clock.max_iteration = sys.maxint\nClock.max_iteration = 100000\n\nSHADOW_RADIUS = 15.0\n\ndivider = 1\nwidth = 1080.0\nheight = 1704.0\nsideSpacerSiseHint = size_hint = [180 / width, 1]\n\nConfig.set('graphics', 'resizable', 1)\nConfig.set('graphics', 'width', int(width / divider))\nConfig.set('graphics', 'height', int(height / divider))\n\n\n# насколько я понял, эта хрень нужна для теней. Но как она работает полностью я не понимаю.\ndef rounded_rectangle(self, xy, corner_radius, fill=None, outline=None):\n ulpt = xy[0]\n brpt = xy[1]\n self.rectangle([(ulpt[0], ulpt[1] + corner_radius), (brpt[0], brpt[1] - corner_radius)], fill=fill, outline=outline)\n self.rectangle([(ulpt[0] + corner_radius, ulpt[1]), (brpt[0] - corner_radius, brpt[1])], fill=fill, outline=outline)\n self.pieslice([ulpt, (ulpt[0] + corner_radius * 2, ulpt[1] + corner_radius * 2)], 180, 270, fill=fill, outline=outline)\n self.pieslice([(brpt[0] - corner_radius * 2, brpt[1] - corner_radius * 2), brpt], 0, 90, fill=fill, outline=outline)\n self.pieslice([(ulpt[0], brpt[1] - corner_radius * 2), (ulpt[0] + corner_radius * 2, brpt[1])], 90, 180, fill=fill, outline=outline)\n self.pieslice([(brpt[0] - corner_radius * 2, ulpt[1]), (brpt[0], ulpt[1] + corner_radius * 2)], 270, 360, fill=fill, outline=outline)\n\n\n# Это тоже для теней.\nclass RoundedWidget(Widget):\n def __init__(self, **kwargs):\n super(RoundedWidget, self).__init__(**kwargs)\n self.background_color = (1, 1, 1, 0)\n if kwargs.has_key('background_color'):\n background_color = kwargs['background_color']\n else:\n background_color = (1, 1, 1, 0)\n with self.canvas.before:\n Color(rgba=background_color)\n self.rect = RoundedRectangle(pos=self.pos, size=self.size, radius=[20, ])\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n\n# Вот это здесь зачем непонятно.\nclass RoundedFlatButton(ButtonBehavior, RoundedWidget, Label):\n pass\n\n\neffect_drop_shadow = b'''\n#define M_PI 3.1415926535897932384626433832795\n\nvec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords) {{\n vec2 coords2;\n float x, y;\n float radius, sampling, surface;\n vec4 tint, shadow;\n coords2 = coords + vec2({offset_x:f}, {offset_y:f}) ;\n radius = {radius:f};\n sampling = {sampling:f};\n tint = vec4({r:f}, {g:f}, {b:f}, {a:f});\n if (color.a >= .99)\n return color;\n surface = (sampling * M_PI * radius * radius) / 2.;\n shadow = vec4(0., 0., 0., 0.);\n for (x = -radius; x < radius; x += sampling)\n for (y = -radius; y < radius; y += sampling)\n if (length(vec2(x, y)) <= radius)\n shadow += texture2D(\n texture,\n vec2(coords2.x + x, coords2.y + y) / resolution\n ).a * tint / surface;\n return color + shadow * (shadow.a - color.a);\n}}\n'''\n\n\nclass DropShadowEffect(EffectBase):\n '''Add DropShadow to the input.'''\n offset = ListProperty([0, 0])\n tint = ListProperty([0, 0, 0, 1])\n radius = NumericProperty(1)\n sampling = NumericProperty(1)\n\n def __init__(self, *args, **kwargs):\n super(DropShadowEffect, self).__init__(*args, **kwargs)\n self.fbind('offset', self.do_glsl)\n self.fbind('tint', self.do_glsl)\n self.fbind('radius', self.do_glsl)\n self.fbind('sampling', self.do_glsl)\n self.do_glsl()\n\n def on_size(self, *args):\n self.do_glsl()\n\n def do_glsl(self, *args):\n self.glsl = effect_drop_shadow.format(\n offset_x=self.offset[0],\n offset_y=self.offset[1],\n radius=self.radius,\n sampling=self.sampling,\n r=self.tint[0],\n g=self.tint[1],\n b=self.tint[2],\n a=self.tint[3],\n )\n\n\nclass RoundedShadowButton(BoxLayout):\n def __init__(self, **kwargs):\n super(RoundedShadowButton, self).__init__(**kwargs)\n if kwargs.has_key('shadow_color'):\n self.shadow_color = kwargs['shadow_color']\n else:\n self.shadow_color = (1, 1, 1, 0)\n self.bind(size=self.update_rect)\n self.effect = EffectWidget(size_hint=[1, 1])\n self.button = RoundedFlatButton(**kwargs)\n self.button.pos = (SHADOW_RADIUS / divider, SHADOW_RADIUS / divider)\n self.button.size_hint = [None, None]\n self.effect.add_widget(self.button)\n self.effect.effects = [DropShadowEffect(radius=SHADOW_RADIUS / divider, tint=[0, 0, 0, 0.8])]\n self.add_widget(self.effect)\n\n def update_rect(self, *args):\n ow, oh = self.size[0], self.size[1]\n self.button.size = (ow - SHADOW_RADIUS * 2 / divider, oh - SHADOW_RADIUS * 2 / divider)\n\n\ndef markup_text(size, color, text, bold=True, font=None):\n if bold:\n bs = '[b]'\n be = '[/b]'\n else:\n bs = ''\n be = ''\n\n if not font:\n font = 'RobotoCondensed-Bold' if bold else 'RobotoCondensed-Regular'\n\n return '[size=' + str(size / divider) + '][color=' + color + '][font=' + font + ']' + bs + text + be + '[/font][/color][/size]'\n\n\nclass NonAlcogolic(App):\n\n def build(self):\n settings = MySettings()\n myScreenmanager = ScreenManager(transition=FadeTransition())\n startScreen = StartScreen(name='StartScreen', settings=settings)\n secondScreen = SecondScreen(name='SecondScreen', settings=settings)\n programScreen = Program(name='ProgramScreen', settings=settings)\n menuScreen = Menu(name='MenuScreen', settings=settings)\n warningOne = WarningOne(name='WarningOne')\n warningTwo = WarningTwo(name='WarningTwo')\n warningThree = WarningThree(name='WarningThree', settings=settings)\n oneRazNotKontrabas = OneRazNotKontrabas(name='OneRazNotKontrabas')\n twoRazIsKontrabas = TwoRazIsKontrabas(name='TwoRazIsKontrabas', settings=settings)\n myScreenmanager.add_widget(startScreen)\n myScreenmanager.add_widget(secondScreen)\n myScreenmanager.add_widget(programScreen)\n myScreenmanager.add_widget(menuScreen)\n myScreenmanager.add_widget(oneRazNotKontrabas)\n myScreenmanager.add_widget(twoRazIsKontrabas)\n myScreenmanager.add_widget(warningOne)\n myScreenmanager.add_widget(warningTwo)\n myScreenmanager.add_widget(warningThree)\n if settings.isNotReady:\n myScreenmanager.current = 'StartScreen'\n else:\n myScreenmanager.current = 'ProgramScreen'\n return myScreenmanager\n\n\nclass StartScreen(Screen):\n\n def __init__(self, **kwargs):\n super(StartScreen, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[1, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self.settings = kwargs['settings']\n btnLayout = BoxLayout(orientation='horizontal')\n centerColumnLayout = BoxLayout(orientation='vertical')\n leftSpacer = Widget(size_hint=sideSpacerSiseHint)\n rightSpacer = Widget(size_hint=sideSpacerSiseHint)\n topSpacer = Widget(size_hint=[1, .7])\n bottomSpacer = Widget(size_hint=[1, .065])\n firstBtn = RoundedShadowButton(\n text=markup_text(size=50, color='FFFFFF', text='ПЕРЕСТАТЬ ПИТЬ!'),\n markup=True,\n size_hint=[1, 116 / height],\n background_color=(0x0 / 255.0, 0xd6 / 255.0, 0xd6 / 255.0, 1),\n shadow_color=(0x19, 0xb6, 0xbb, 1),\n # background_normal='',\n on_press=self.changer\n )\n btnLayout.add_widget(leftSpacer)\n centerColumnLayout.add_widget(topSpacer)\n centerColumnLayout.add_widget(firstBtn)\n centerColumnLayout.add_widget(bottomSpacer)\n btnLayout.add_widget(centerColumnLayout)\n btnLayout.add_widget(rightSpacer)\n\n self.add_widget(btnLayout)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def changer(self, *args):\n self.manager.current = 'SecondScreen'\n\n\nclass SecondScreen(Screen):\n\n def __init__(self, **kwargs):\n super(SecondScreen, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[1, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self.settings = kwargs['settings']\n secondScreenLayout = BoxLayout(orientation='horizontal')\n horizontalBlancLayoutOne = Widget(size_hint=[.25, 1])\n horizontalBlancLayoutTwo = Widget(size_hint=[.25, 1])\n verticalBlancLayoutOne = Widget(size_hint=[1, .2])\n verticalBlancLayoutTwo = Widget(size_hint=[1, .2])\n buttonsLayout = BoxLayout(orientation='vertical', spacing=30, size_hint=[1, 1])\n alignSpacesNum = 17\n oneWeekBtn = Button(\n text=markup_text(size=46, color='5F3C03', text=' ' * alignSpacesNum + \"НА \") + markup_text(size=300, color='5F3C03', text='1') + markup_text(size=46, color='5F3C03', text=' НЕДЕЛЮ'),\n halign='left',\n markup=True,\n size_hint=[1, .25],\n background_color=(1, 1, 1, 1),\n background_normal='',\n on_press=self.changerOneWeek\n )\n oneWeekBtn.bind(size=oneWeekBtn.setter('text_size'))\n oneMonthBtn = Button(\n text=markup_text(size=46, color='74858E', text=' ' * alignSpacesNum + \"НА \") + markup_text(size=300, color='74858E', text='1') + markup_text(size=46, color='74858E', text=' МЕСЯЦ'),\n halign='left',\n markup=True,\n size_hint=[1, .25],\n background_color=(1, 1, 1, 1),\n background_normal='',\n on_press=self.changerOneMonth)\n oneMonthBtn.bind(size=oneMonthBtn.setter('text_size'))\n oneYearBtn = Button(\n text=markup_text(size=46, color='F1BA18', text=' ' * alignSpacesNum + \"НА \") + markup_text(size=300, color='F1BA18', text='1') + markup_text(size=46, color='F1BA18', text=' ГОД'),\n halign='left',\n markup=True,\n size_hint=[1, .25],\n background_color=(1, 1, 1, 1),\n background_normal='',\n on_press=self.changerOneYear)\n oneYearBtn.bind(size=oneYearBtn.setter('text_size'))\n secondScreenLayout.add_widget(horizontalBlancLayoutOne)\n buttonsLayout.add_widget(verticalBlancLayoutOne)\n buttonsLayout.add_widget(oneWeekBtn)\n buttonsLayout.add_widget(oneMonthBtn)\n buttonsLayout.add_widget(oneYearBtn)\n buttonsLayout.add_widget(verticalBlancLayoutTwo)\n secondScreenLayout.add_widget(buttonsLayout)\n\n secondScreenLayout.add_widget(horizontalBlancLayoutTwo)\n\n self.add_widget(secondScreenLayout)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def setDateParameters(self, days):\n self.manager.current = 'ProgramScreen'\n self.settings.startDay = datetime.now()\n self.settings.finalDay = date.today() + timedelta(days)\n\n def changerOneWeek(self, *args):\n self.setDateParameters(7)\n\n def changerOneMonth(self, *args):\n self.setDateParameters(30)\n\n def changerOneYear(self, *args):\n self.setDateParameters(365)\n\n\nclass MySettings(object):\n __finalDay = None\n __startDay = None\n __count = 0\n __isNotReady = True\n __store = None\n __isKontrabas = False\n\n def __init__(self):\n self.__store = DictStore('user.dat')\n if self.__store.store_exists('finalDay') and self.__store.get('finalDay')['data']:\n self.__startDay = self.__store.get('startDay')['data']\n self.__finalDay = self.__store.get('finalDay')['data']\n self.__count = self.__store.get('count')['data']\n self.__isKontrabas = self.__store.get('isKontrabas')['data']\n self.__isNotReady = False\n else:\n self.reset()\n\n def parseDate(self, dateText):\n if dateText:\n return datetime.strptime(dateText, \"%S-%M-%H %d-%m-%Y\")\n else:\n None\n\n def reset(self):\n self.__isNotReady = True\n self.__startDay = None\n self.__finalDay = None\n self.__count = 0\n self.__isKontrabas = False\n self.__store.put('finalDay', data=None)\n self.__store.put('count', data=0)\n self.__store.put('isKontrabas', data=False)\n\n def __getattr__(self, attr):\n if attr == 'isNotReady':\n return self.__isNotReady\n if attr == 'startDay':\n return self.parseDate(self.__startDay)\n if attr == 'finalDay':\n return self.parseDate(self.__finalDay)\n if attr == 'counter':\n return self.__count\n if attr == 'isKontrabas':\n return self.__isKontrabas\n return None\n\n def __setattr__(self, key, value):\n if key == 'startDay':\n if value:\n self.__startDay = value.strftime(\"%S-%M-%H %d-%m-%Y\")\n else:\n self.__startDay = value\n self.__store.put('startDay', data=self.__startDay)\n return\n if key == 'finalDay':\n if value:\n self.__finalDay = value.strftime(\"%S-%M-%H %d-%m-%Y\")\n else:\n self.__finalDay = value\n self.__store.put('finalDay', data=self.__finalDay)\n return\n if key == 'counter':\n self.__count = value\n self.__store.put('count', data=value)\n return\n if key == 'isKontrabas':\n self.__isKontrabas = value\n self.__store.put('isKontrabas', data=value)\n return\n super(MySettings, self).__setattr__(key, value)\n\n\nclass Program(Screen):\n secondsNames = [' СЕКУНДА ', ' СЕКУНДЫ ', ' СЕКУНД ']\n minutesNames = [' МИНУТА ', ' МИНУТЫ ', ' МИНУТ ']\n hoursNames = [' ЧАС ', ' ЧАСА ', ' ЧАСОВ ']\n daysNames = [' ДЕНЬ ', ' ДНЯ ', ' ДНЕЙ ']\n cntNames = [' РАЗ ', ' РАЗА ', ' РАЗ ']\n\n excuses = [\"Панкреатит – врачи запретили\",\n \"А вдруг я будущая мать? Мне нельзя\",\n \"Аллергия на алкоголь, меня раздует\",\n \"Закодировался и пока не подобрал код\",\n \"Временно перешел на колеса – от них лучше прет\",\n \"Принял ислам, там запрещено\",\n \"Меня покусала бешеная белка, пока нельзя\",\n \"Сейчас делаю детокс печени, пить нельзя\",\n \"Мне нельзя — я алкоголик\",\n \"Решил стать альфасамцом и тренирую силу воли\",\n \"Перестали в театр пускать пьяным\",\n \"Не буду – пьяным меня тянет кататься на карусели\",\n \"Вступил в секту, а там главные правила - сухой закон и зеленые галстуки\",\n \"Не хочу пьяным возвращаться домой\",\n \"Узнал, что алкоголь это вредно\",\n \"Хочу дожить до пенсии\",\n \"Узнал что в алкоголе много калорий – от него толстеют\",\n \"Перестал понимать трезвых людей\",\n \"Устал от случайных сексуальных связей\",\n \"Нога чешется, когда пью\",\n \"Слишком много хорошего в последнее время\",\n \"Вступил в клуб анонимных алкоголиков, а вы знаете, как меня зовут\",\n \"Надоели зеленые человечки\",\n \"Я слишком любвеобильный, когда пьяный\",\n \"Когда напьюсь, у меня в голове рождаются слишком умные мысли, и окружающие перестают меня понимать\",\n \"Боюсь перекачаться, поднимая стакан\",\n \"В мире слишком много глупости – не время пить\",\n \"Я бы хотел, но не хочу\",\n \"Жена запретила\",\n \"Сегодня свидание, не хочу чтобы она сразу поняла, что я алкоголик\",\n \"Когда выпиваю, начинаю петь на испанском\",\n \"Завтра анализы сдавать\",\n \"Меня прет от трезвого состояния – такое редко бывает\",\n \"Вечером тараканов травить буду\",\n \"Я беременный и мне нельзя\",\n \"Поспорил на 100$ что неделю не буду пить\",\n \"У меня безалкогольная диета\",\n \"Я за рулем\",\n \"Меня покусал бешеный слон и пока нельзя\",\n \"Проиграл в карты, что не буду пить. А карточный долг священен\",\n \"Хочу узнать что такое алкогольная депривация\",\n \"Меня покусал бешеный хомячок и пока нельзя\",\n \"Голоса мне говорят, что пока не стоит\"\n ]\n\n def __init__(self, **kwargs):\n super(Program, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[1, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self.settings = kwargs['settings']\n\n parentLayout = BoxLayout(orientation='vertical')\n self.menuLayout = BoxLayout(orientation='horizontal', size_hint=[1, 200 / height])\n self.blancMenuLayoutWidget = Widget(size_hint=[880 / width, 1])\n menuButton = Button(text=markup_text(size=40, color='000000', text=u'\\ue9bd', font='icomoon'), background_color=(1, 1, 1, 1), background_normal='', markup=True, size_hint=[200 / width, 1], on_press=self.changer)\n self.menuLayout.add_widget(self.blancMenuLayoutWidget)\n self.menuLayout.add_widget(menuButton)\n parentLayout.add_widget(self.menuLayout)\n\n mainLayout = BoxLayout(orientation='horizontal')\n leftSpacer = Widget(size_hint=sideSpacerSiseHint)\n rightSpacer = Widget(size_hint=sideSpacerSiseHint)\n mainLayout.add_widget(leftSpacer)\n\n self.programLayoutWidth = 940.0\n programLayout = BoxLayout(orientation='vertical')\n\n self.cntLabelWidget, self.cntLbl, self.cntTxtLbl = self.getCountWidget(markup_text(size=46, color='92290E', text='ПРЕДЛОЖИЛИ\\nВЫПИТЬ', font='Roboto-Black'))\n goneLabelWidget, self.goneLbl, self.goneTxtLbl = self.getCountWidget(markup_text(size=46, color='75868F', text='ПРОШЛО', font='Roboto-Black'))\n leftLabelWidget, self.leftLbl, self.leftTxtLbl = self.getCountWidget(markup_text(size=46, color='75868F', text='ОСТАЛОСЬ', font='Roboto-Black'))\n\n horisontalUpperButtonSpacer = Widget(size_hint=[1, 144 / height])\n buttonProposal = RoundedShadowButton(\n text=markup_text(size=50, color='FFFFFF', text='МНЕ ПРЕДЛОЖИЛИ ВЫПИТЬ'),\n markup=True,\n size_hint=[1, 145 / height],\n background_color=(0x92 / 255.0, 0x29 / 255.0, 0x0e / 255.0, 1), # 92290E\n shadow_color=(0x4E, 0x16, 0x08, 1), # 4E1608\n on_press=self.btnPress\n )\n horisontalBottomButtonSpacer = Widget(size_hint=[1, 135 / height])\n\n programLayout.add_widget(self.cntLabelWidget)\n programLayout.add_widget(goneLabelWidget)\n programLayout.add_widget(leftLabelWidget)\n programLayout.add_widget(horisontalUpperButtonSpacer)\n programLayout.add_widget(buttonProposal)\n programLayout.add_widget(horisontalBottomButtonSpacer)\n mainLayout.add_widget(programLayout)\n mainLayout.add_widget(rightSpacer)\n parentLayout.add_widget(mainLayout)\n self.add_widget(parentLayout)\n Clock.schedule_interval(self.updateLabels, 1)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def getCountWidget(self, text):\n widgetHeight = 384.0\n mainLayout = BoxLayout(orientation='vertical', size_hint=[1, widgetHeight / height])\n labelOne = Label(text=text, markup=True, size_hint=[1, 122.0 / widgetHeight], halign='left', valign='center')\n labelOne.bind(size=labelOne.setter('text_size'))\n addLayout = BoxLayout(orientation='horizontal', size_hint=[1, 224.0 / widgetHeight])\n counterLabel = Label(text='', markup=True, size_hint=[0.571, 1.39], halign='right', valign='top')\n counterLabel.bind(size=counterLabel.setter('text_size'))\n textLabel = Label(text='', markup=True, size_hint=[0.429, 1], halign='left', valign='bottom')\n textLabel.bind(size=textLabel.setter('text_size'))\n addLayout.add_widget(counterLabel)\n addLayout.add_widget(textLabel)\n horizontalBlankBottomWidget = Widget(size_hint=[1, 50.0 / widgetHeight])\n mainLayout.add_widget(labelOne)\n mainLayout.add_widget(addLayout)\n mainLayout.add_widget(horizontalBlankBottomWidget)\n return mainLayout, counterLabel, textLabel\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def updateLabels(self, *args):\n if self.settings.finalDay:\n currentDate = datetime.now()\n diffGone = currentDate - self.settings.startDay\n diffLeft = self.settings.finalDay - currentDate\n self.cntLbl.text = markup_text(size=300, color='92290E', text=str(self.settings.counter))\n self.cntTxtLbl.text = markup_text(size=46, color='92290E', text=self.cntNames[self.getNumOfVariant(self.settings.counter)], font='Roboto-Black')\n cnt, txt = self.getTextForTimers(diffGone)\n self.goneLbl.text = markup_text(size=300, color='75868F', text=cnt)\n self.goneTxtLbl.text = markup_text(size=46, color='75868F', text=txt, font='Roboto-Black')\n cnt, txt = self.getTextForTimers(diffLeft)\n self.leftLbl.text = markup_text(size=300, color='75868F', text=cnt)\n self.leftTxtLbl.text = markup_text(size=46, color='75868F', text=txt, font='Roboto-Black')\n # self.show_marks1(self.menuLayout)\n # self.show_marks2(self.leftTxtLbl)\n\n def show_marks1(self, widget):\n # Indicate the position of the anchors with a red top marker\n widget.canvas.before.clear()\n with widget.canvas.before:\n Color(1, 0, 0, 0.5)\n Rectangle(pos=widget.pos, size=widget.size)\n\n def show_marks2(self, widget):\n # Indicate the position of the anchors with a red top marker\n widget.canvas.before.clear()\n with widget.canvas.before:\n Color(0, 1, 0, 0.5)\n Rectangle(pos=widget.pos, size=widget.size)\n\n def getNumOfVariant(self, num):\n chk = num % 10\n if chk == 1 and num != 11:\n return 0\n if 1 < chk < 5 and not [12, 13, 14].__contains__(num):\n return 1\n return 2\n\n def getTextForTimers(self, diff):\n if diff.days <= 0:\n if diff.seconds < 60:\n return str(diff.seconds), self.secondsNames[self.getNumOfVariant(diff.seconds)]\n else:\n diffMinutes = diff.seconds / 60\n if diffMinutes < 60:\n return str(diffMinutes), self.minutesNames[self.getNumOfVariant(diffMinutes)]\n else:\n diffHours = diffMinutes / 60\n return str(diffHours), self.hoursNames[self.getNumOfVariant(diffHours)]\n\n else:\n return str(diff.days), self.daysNames[self.getNumOfVariant(diff.days)]\n\n def changer(self, *args):\n self.manager.current = 'MenuScreen'\n\n def btnPress(self, *args):\n self.settings.counter += 1\n # всплывает попап с отмазкой\n excuse = self.excuses[randint(0, len(self.excuses) - 1)]\n # textLabel = Label(text=markup_text(size=80, color='000000', text=exсuse, bold=False), markup=True, size_hint=(0.8, 0.8), valign='top')\n # textLabel.bind(size=textLabel.setter('text_size'))\n # popup = ModalView(title=\"ОТМАЗКА НА СЕГОДНЯ\",\n # title_color=(0x75 / 255.0, 0x86 / 255.0, 0x8F / 255.0, 1), # 75868F\n # title_size=46 / divider,\n # #background='white',\n # background_color=(1, 1, 1, 0),\n # separator_color=(1, 1, 1, 1),\n # content=textLabel,\n # size_hint=(.7, .5))\n\n popup = ModalView(size_hint=[0.8, 0.6])\n effectWidget = EffectWidget(size_hint=[1.2, 1.2])\n effectLayout = AnchorLayout(anchor_x='center', anchor_y='center', size_hint=[1, 1])\n popupWidget = RoundedWidget(size_hint=[0.9, 0.9], background_color=(1, 1, 1, 1), shadow_color=(70, 70, 70, 1))\n widgetLayout = BoxLayout(orientation='vertical')\n\n def popupUpdate(instance, *args):\n x, y = instance.size\n widgetLayout.size = (x - 100, y - 100)\n w, h = instance.pos\n widgetLayout.pos = (w + 50, h + 50)\n\n popupWidget.bind(size=popupUpdate, pos=popupUpdate) # popupButton.setter('text_size'))\n captionLabel = Label(text=markup_text(size=46, color='75868F', text='ОТМАЗКА НА СЕГОДНЯ', font='Roboto-Black'), markup=True, size_hint=(1, 0.35), valign='top', halign='left')\n captionLabel.bind(size=captionLabel.setter('text_size'))\n textLabel = Button(text=markup_text(size=80, color='000000', text=excuse, bold=False), markup=True, size_hint=(1, 0.65), valign='top', halign='left', background_color=(0, 0, 0, 0), on_press=popup.dismiss)\n textLabel.bind(size=textLabel.setter('text_size'))\n widgetLayout.add_widget(captionLabel)\n widgetLayout.add_widget(textLabel)\n popupWidget.add_widget(widgetLayout)\n effectLayout.add_widget(popupWidget)\n effectWidget.add_widget(effectLayout)\n effectWidget.effects = [DropShadowEffect(radius=SHADOW_RADIUS / divider, tint=[0, 0, 0, 0.7])]\n popup.add_widget(effectWidget)\n popup.background_color = (0.2, 0.2, 0.2, 0.6)\n\n popup.open()\n\n\nclass Menu(Screen):\n\n def __init__(self, **kwargs):\n super(Menu, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[1, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self.settings = kwargs['settings']\n topBlankWidget = Widget(size_hint=[1, .1])\n bottomBlankWidget = Widget(size_hint=[1, .1])\n buttonsLayout = BoxLayout(orientation='vertical', spacing=30, size_hint=[1, 1])\n oneWeekBtn = Button(\n text=markup_text(size=80, color='92290E', text=\"ПЕРЕСТАТЬ НЕ ПИТЬ\", font='Roboto-Black'),\n halign='center',\n valign='center',\n markup=True,\n size_hint=[1, .3],\n background_color=(1, 1, 1, 1),\n background_normal='',\n on_press=self.changerWarningOneScr\n )\n oneWeekBtn.bind(size=oneWeekBtn.setter('text_size'))\n if not self.settings.isKontrabas:\n colorForIDrink = 'F1BA18'\n else:\n colorForIDrink = '92290E'\n self.iDrinkBtn = Button(\n text=markup_text(size=80, color=colorForIDrink, text=\"Я ВЫПИЛ\", font='Roboto-Black'),\n halign='center',\n valign='center',\n markup=True,\n size_hint=[1, .3],\n background_color=(1, 1, 1, 1),\n background_normal='',\n on_press=self.changer)\n self.iDrinkBtn.bind(size=self.iDrinkBtn.setter('text_size'))\n oneYearBtn = Button(\n text=markup_text(size=80, color='74858E', text=\"ЗАКРЫТЬ МЕНЮ\", font='Roboto-Black'),\n halign='center',\n valign='center',\n markup=True,\n size_hint=[1, .3],\n background_color=(1, 1, 1, 1),\n background_normal='',\n on_press=self.closeMenu)\n oneYearBtn.bind(size=oneYearBtn.setter('text_size'))\n buttonsLayout.add_widget(topBlankWidget)\n buttonsLayout.add_widget(oneWeekBtn)\n buttonsLayout.add_widget(self.iDrinkBtn)\n buttonsLayout.add_widget(oneYearBtn)\n buttonsLayout.add_widget(bottomBlankWidget)\n self.add_widget(buttonsLayout)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def on_enter(self, *args):\n if not self.settings.isKontrabas:\n colorForIDrink = 'F1BA18'\n else:\n colorForIDrink = '92290E'\n self.iDrinkBtn.text = markup_text(size=80, color=colorForIDrink, text=\"Я ВЫПИЛ\", font='Roboto-Black')\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def closeMenu(self, *args):\n self.manager.current = 'ProgramScreen'\n\n def changer(self, *args):\n self.settings.startDay = datetime.now()\n if not self.settings.isKontrabas:\n self.settings.isKontrabas = True\n self.manager.current = 'OneRazNotKontrabas'\n else:\n self.manager.current = 'TwoRazIsKontrabas'\n\n def changerWarningOneScr(self, *args):\n self.manager.current = 'WarningOne'\n\n\ndef buildWarningForm(textLbl, textBtn1, eventBtn1, textBtn2, eventBtn2):\n buttonSizeHint = [1, 100 / height]\n mainLayout = BoxLayout(orientation='horizontal')\n leftSpacer = Widget(size_hint=sideSpacerSiseHint)\n rightSpacer = Widget(size_hint=sideSpacerSiseHint)\n mainLayout.add_widget(leftSpacer)\n warninLayout = BoxLayout(orientation='vertical', spacing=50)\n lblLayout = AnchorLayout(size_hint_y=0.5)\n warninLbl = Label(text=markup_text(size=80, color='92290E', text=textLbl, bold=False, font='Roboto-Black'),\n markup=True,\n size_hint_x=0.8,\n haligh='center',\n valign='center')\n warninLbl.bind(size=warninLbl.setter('text_size'))\n lblLayout.add_widget(warninLbl)\n warninLayout.add_widget(lblLayout)\n if not textBtn1 or not textBtn2:\n bottomBlankWidget1 = Widget(size_hint=buttonSizeHint)\n warninLayout.add_widget(bottomBlankWidget1)\n if textBtn1:\n button1 = RoundedShadowButton(\n text=markup_text(size=50, color='FFFFFF', text=textBtn1),\n markup=True,\n size_hint=buttonSizeHint,\n background_color=(0x92 / 255.0, 0x29 / 255.0, 0x0e / 255.0, 1), # 92290E\n shadow_color=(0x4E, 0x16, 0x08, 1), # 4E1608\n on_press=eventBtn1\n )\n warninLayout.add_widget(button1)\n if textBtn2:\n button2 = RoundedShadowButton(\n text=markup_text(size=50, color='FFFFFF', text=textBtn2),\n markup=True,\n size_hint=buttonSizeHint,\n background_color=(0x0 / 255.0, 0xd6 / 255.0, 0xd6 / 255.0, 1),\n shadow_color=(0x19, 0xb6, 0xbb, 1),\n on_press=eventBtn2\n )\n warninLayout.add_widget(button2)\n\n bottomBlankWidget2 = Widget(size_hint_x=1, size_hint_y=0.035)\n warninLayout.add_widget(bottomBlankWidget2)\n\n mainLayout.add_widget(warninLayout)\n mainLayout.add_widget(rightSpacer)\n return mainLayout\n\n\nclass OneRazNotKontrabas(Screen):\n def __init__(self, **kwargs):\n super(OneRazNotKontrabas, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[1, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n warninLayout = buildWarningForm('ЭХ... НУ ЧТОЖ ТЫ...\\nОДИН РАЗ НЕ КОНТРАБАС, ХОТЯ САМ ПОНИМАЕШЬ...', None, None, \"OK, НЕ БУДУ БОЛЬШЕ ПИТЬ\", self.changerCancel)\n self.add_widget(warninLayout)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def changerCancel(self, *args):\n self.manager.current = 'ProgramScreen'\n\n\nclass TwoRazIsKontrabas(Screen):\n def __init__(self, **kwargs):\n super(TwoRazIsKontrabas, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[4, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self.settings = kwargs['settings']\n warninLayout = buildWarningForm('ЭТА ПРОГРАММА ДЛЯ МУЖИКОВ, А НЕ ДЛЯ ТЕБЯ!', \"Я НЕ МУЖИК\", self.changerNext, None, None)\n self.add_widget(warninLayout)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def changerNext(self, *args):\n self.settings.reset()\n self.manager.current = 'StartScreen'\n\n\nclass WarningOne(Screen):\n def __init__(self, **kwargs):\n super(WarningOne, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[1, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n warninLayout = buildWarningForm('ТЫ ЖЕ ОБЕЩАЛ НЕ ПИТЬ! МУЖИК ВСЕГДА ДЕРЖИТ СЛОВО. ТЫ ЧТО, НЕ МУЖИК?', \"Я НЕ МУЖИК\", self.changerNext, \"ЛАДНО, НЕ БУДУ ПИТЬ\", self.changerCancel)\n self.add_widget(warninLayout)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def changerNext(self, *args):\n self.manager.current = 'WarningTwo'\n\n def changerCancel(self, *args):\n self.manager.current = 'ProgramScreen'\n\n\nclass WarningTwo(Screen):\n def __init__(self, **kwargs):\n super(WarningTwo, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[1, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n warninLayout = buildWarningForm('КАК ТЫ ПОТОМ БУДЕШЬ СМОТРЕТЬ В ГЛАЗА СВОИМ ДРУЗЬЯМ, КОТОРЫЕ ВЕРИЛИ ТЕБЕ?', \"НИКАК, Я ДЕРЬМО\", self.changerNext, \"ОК, Я ПЕРЕДУМАЛ. НЕ БУДУ ПИТЬ\", self.changerCancel)\n self.add_widget(warninLayout)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def changerNext(self, *args):\n self.manager.current = 'WarningThree'\n\n def changerCancel(self, *args):\n self.manager.current = 'ProgramScreen'\n\n\nclass WarningThree(Screen):\n def __init__(self, **kwargs):\n super(WarningThree, self).__init__(**kwargs)\n with self.canvas:\n Color(rgba=[1, 1, 1, 1])\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self.settings = kwargs['settings']\n warninLayout = buildWarningForm('И ПОСЛЕ ЭТОГО ТЫ СЧИТАЕШЬ СЕБЯ АЛЬФА САМЦОМ?', \"НЕТ, Я ЛОХ\", self.changerNext, \"НЕ БУДУ ТАК БОЛЬШЕ! ИЗВИНИТЕ!\", self.changerCancel)\n self.add_widget(warninLayout)\n self.bind(pos=self.update_rect, size=self.update_rect)\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def changerNext(self, *args):\n self.settings.reset()\n self.manager.current = 'StartScreen'\n\n def changerCancel(self, *args):\n self.manager.current = 'ProgramScreen'\n\n\nif __name__ == \"__main__\":\n NonAlcogolic().run()\n","repo_name":"med179/nonAlcogolic","sub_path":"nonAlcogolic.py","file_name":"nonAlcogolic.py","file_ext":"py","file_size_in_byte":37917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34479523636","text":"import random\n\nfrequency1 = 0\nfrequency2 = 0\nfrequency3 = 0\nfrequency4 = 0\nfrequency5 = 0\nfrequency6 = 0\n\nfor roll in range(6_000_000):\n face = random.randrange(1, 7)\n if face == 1:\n frequency1 += 1\n elif face == 2:\n frequency2 += 1\n elif face == 3:\n frequency3 += 1\n elif frequency4 == 4:\n frequency4 += 1\n elif frequency5 == 5:\n frequency5 += 1\n elif frequency6 == 6:\n frequency6 += 1\nprint(f'Face{\"Frequency\" :>13}')\nprint(f'{1:>3}{frequency1:>13}')\nprint(f'{2:>3}{frequency2:>13}')\nprint(f'{3:>3}{frequency3:>13}')\nprint(f'{4:>3}{frequency4:>13}')\nprint(f'{5:>3}{frequency5:>13}')\nprint(f'{6:>3}{frequency6:>13}')\n","repo_name":"everybees/python_with_cohorts","sub_path":"nine/judith/chapter_three/seperating_digit_in_an_integer_.py","file_name":"seperating_digit_in_an_integer_.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21245579227","text":"import argparse\nimport glob\nimport os\nfrom datetime import datetime\n\nfrom lxml import etree\n\nfrom corpustools import argparse_version, corpuspath, util\n\n\ndef parse_options():\n \"\"\"Parse the options for this script.\"\"\"\n parser = argparse.ArgumentParser(\n parents=[argparse_version.parser],\n description=\"Turn cesDoc xml into our homegrown xml.\",\n )\n\n parser.add_argument(\"lang\", help=\"Language of the file\")\n parser.add_argument(\"testament\", choices=[\"ot\", \"nt\"], help=\"Old or new testament\")\n parser.add_argument(\"cesdoc\", help=\"The cesdoc that should be converted\")\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Turn cesDoc to homegrown xml.\"\"\"\n args = parse_options()\n tree = etree.parse(args.cesdoc)\n\n chapter_paths = (\n save_chapter(\n args.lang,\n args.testament,\n f\"{bookindex:0>2}_{chapterindex:0>3}\",\n get_verses(chapter),\n os.path.basename(args.cesdoc),\n )\n for (bookindex, book) in enumerate(tree.xpath(\".//div[@type='book']\"), start=1)\n for (chapterindex, chapter) in enumerate(\n book.xpath(\".//div[@type='chapter']\"), start=1\n )\n )\n\n set_parallels(chapter_paths, args.testament, args.lang)\n\n\ndef get_verses(chapter):\n \"\"\"Extract the chapter content.\"\"\"\n body = etree.Element(\"body\")\n for seg in chapter.iter(\"seg\"):\n verse = etree.SubElement(body, \"verse\")\n verse.set(\"number\", seg.get(\"id\").split(\".\")[-1])\n verse.text = seg.text.strip()\n\n return body\n\n\ndef save_chapter(language, testament, filename, body, address):\n \"\"\"Save chapter info.\"\"\"\n language_year = {\"nob\": 2011, \"sme\": 2019.0}\n name = os.path.join(\n os.getenv(\"GTBOUND\"),\n \"orig\",\n language,\n \"bible\",\n testament,\n \"bibel.no\",\n f\"{filename}.xml\",\n )\n with util.ignored(OSError):\n os.makedirs(os.path.dirname(name))\n\n path = corpuspath.make_corpus_path(name)\n path.metadata.set_variable(\"filename\", address)\n path.metadata.set_variable(\"mainlang\", language)\n path.metadata.set_variable(\"genre\", \"bible\")\n path.metadata.set_variable(\"monolingual\", \"1\")\n path.metadata.set_variable(\"license_type\", \"standard\")\n path.metadata.set_variable(\"year\", language_year.get(language, datetime.now().year))\n\n path.metadata.write_file()\n root = etree.Element(\"document\")\n root.append(body)\n\n with open(name, \"wb\") as page_stream:\n page_stream.write(etree.tostring(root, encoding=\"utf8\", pretty_print=True))\n\n return path\n\n\ndef set_parallels(chapter_paths, testament, new_lang):\n \"\"\"Set the parallels.\n\n Use the nob names as the base, it has all the books and chapters.\n \"\"\"\n nob_names = sorted(\n glob.glob(\n f'{os.path.join(os.getenv(\"GTBOUND\"), \"orig/nob/bible\", testament, \"bibel.no\")}/*.xml'\n )\n )\n for (chapter_path, nob_name) in zip(chapter_paths, nob_names):\n nob_path = corpuspath.make_corpus_path(nob_name)\n nob_meta = nob_path.metadata\n chapter_meta = chapter_path.metadata\n\n chapter_meta.set_parallel_text(\"nob\", os.path.basename(nob_name))\n nob_meta.set_parallel_text(new_lang, os.path.basename(chapter_path.orig))\n nob_meta.write_file()\n\n for (lang, filename) in nob_meta.get_parallel_texts().items():\n chapter_meta.set_parallel_text(lang, filename)\n parallel_path = corpuspath.make_corpus_path(nob_path.parallel(lang))\n parallel_path.metadata.set_parallel_text(\n new_lang, os.path.basename(chapter_path.orig)\n )\n parallel_path.metadata.write_file()\n\n chapter_meta.write_file()\n","repo_name":"giellalt/CorpusTools","sub_path":"corpustools/ces2homegrown.py","file_name":"ces2homegrown.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"15636872564","text":"# model\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\n\n\n# dataset and transformation\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport os\n\n# display images\nfrom torchvision import utils\nimport matplotlib.pyplot as plt\n\n# utils\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom torchsummary import summary\n\n#train\nfrom torch import optim\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\n\nfrom train import *\n\nfrom dataset import *\n\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='parameters')\nparser.add_argument('--model', type=str, default=\"xeception\", help='select model')\nparser.add_argument('--img_size', type=int, default=640, help='img_size')\nparser.add_argument('--epoch', type=int, default=200, help='epoch')\nparser.add_argument('--batch_size', type=int, default=32, help='batch_size')\nparser.add_argument('--test_ratio', type=float, default=0.2, help='test_ratio')\nparser.add_argument('--img_dir', type=str, default='./data/images2', help='img_dri')\nparser.add_argument('--csv_path', type=str, default='dataset.csv', help='csv_path')\nparser.add_argument('--val_csv_path', type=str, default=None, help='val_csv_path')\nparser.add_argument('--train_name', type=str, default=\"train_\", help='train name')\nparser.add_argument('--weight', type=str, default=\"None\", help='pretrained_weight_path')\nparser.add_argument('--loss', type=str, default=\"multi\", help='multi or softmax')\n\n\nargs = parser.parse_args()\nSELECTED_MODEL = args.model\nIMG_SIZE = args.img_size\nEPOCH = args.epoch\nBATCH_SIZE = args.batch_size\nTRAIN_RATIO = args.test_ratio\nIMG_DIR = args.img_dir\nCSV_PATH = args.csv_path\nVAL_PATH = args.val_csv_path\nSAVE_FOLDER_NAME = args.train_name #folder name - > models/train_0/ save weights and result\nLOSS_MODE = args.loss\nweight_path = args.weight\n\ndef create_directory():\n i = 1\n while True:\n dir_name = os.path.join('models/'+SAVE_FOLDER_NAME+ str(i) +'/')\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n os.makedirs(dir_name+'/result')\n return dir_name\n break\n i += 1\n\nsave_path = create_directory()\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\ntorch.cuda.reset_max_memory_allocated(device=None)\ntorch.cuda.empty_cache()\n\n\n# define transformation\ntransformation = transforms.Compose([\n transforms.ToTensor(),\n transforms.Resize(IMG_SIZE)\n])\n\n\ntrain_df, val_df, NUM_CLS, cls_list = get_data_from_csv(csv_path=CSV_PATH,img_dir=IMG_DIR, train_ratio=TRAIN_RATIO, randoms_state=42, val_csv_path=VAL_PATH)\n\n\ntrain_set = CustomDataset(train_df,num_classes=NUM_CLS, image_dir=IMG_DIR, class_list= cls_list ,img_resize=True, img_dsize=(IMG_SIZE,IMG_SIZE))\ntrain_set.transforms = transformation\n\nval_set = CustomDataset(val_df,num_classes=NUM_CLS, image_dir=IMG_DIR, class_list= cls_list, img_resize=True, img_dsize=(IMG_SIZE,IMG_SIZE))\nval_set.transforms = transformation\n\n#################################################모델선언!\nif SELECTED_MODEL == 'xeception':\n from xeception import *\n model = Xception(num_classes=NUM_CLS)\n\nelif SELECTED_MODEL == 'googlenetv4':\n from googlenetv4 import *\n model = InceptionV4(num_classes=NUM_CLS)\n\nelif SELECTED_MODEL == 'visionT':\n from ViT import ViT\n model = ViT(num_classes=NUM_CLS)\n \nelse:\n print('select model in list - xeception , googlenetv4 , visionT')\n\nprint(f'train with {SELECTED_MODEL}')\n#######################################가중치 이어서 돌릴경우임.\nif weight_path != \"None\":\n model.load_state_dict(torch.load(weight_path, map_location=device))\n\n########################gpu개수 세고.. 병렬로 자동으로...뭐...기타..#\nnum_device = torch.cuda.device_count()\ndevice_idx = []\nfor i in range(num_device):\n if torch.cuda.get_device_name(i) == \"NVIDIA DGX Display\":\n print(f\"Device is not using : {torch.cuda.get_device_name(i)}\")\n else:\n device_idx.append(i)\n\nif torch.cuda.device_count() > 1:\n print(\"Let's use\",num_device, \"GPUs!\")\n if torch.cuda.device_count() > 4: #for GCT\n model=model.to('cuda:0')\n model = nn.DataParallel(model, device_ids=device_idx)\n else:\n model = model.to(device=device)\n model = nn.DataParallel(model)\nelse:\n model = model.to(device=device)\n\n############################윈도우에서는 워커 주면안됨\nif os.name == \"nt\":\n train_loader = DataLoader(train_set, batch_size=BATCH_SIZE)\n val_loader = DataLoader(train_set, batch_size=int(BATCH_SIZE//num_device))\nelse:\n train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, num_workers=4*num_device)\n val_loader = DataLoader(train_set, batch_size=int(BATCH_SIZE//num_device), num_workers=4)\n\n\n# define loss function, optimizer, lr_scheduler\nif LOSS_MODE == 'multi':\n loss_func = nn.MultiLabelSoftMarginLoss()\nelif LOSS_MODE == 'softmax':\n loss_func = nn.CrossEntropyLoss()\nopt = optim.Adam(model.parameters(), lr=0.001)\nlr_scheduler = ReduceLROnPlateau(opt, mode='min', factor=0.1, patience=50)\n\nparams_train = {\n 'num_epochs':EPOCH,\n 'optimizer':opt,\n 'loss_func':loss_func,\n 'train_dl':train_loader,\n 'val_dl':val_loader,\n 'sanity_check':False,\n 'lr_scheduler':lr_scheduler,\n 'path2weights':save_path,\n 'loss_mode' : LOSS_MODE,\n}\n\nsummary(model, (3, IMG_SIZE, IMG_SIZE), device=device.type)\n\ntraind_model, loss_hist, metric_hist, metric_cls_hist = train_val(model, device, params_train)\n\n","repo_name":"sidsid84-kor/xeception","sub_path":"start_train.py","file_name":"start_train.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7909035185","text":"from os import sep\nfrom types import LambdaType\nfrom matplotlib.ticker import Formatter\nimport matplotlib.ticker as mtick\nimport pandas as pd\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom matplotlib import pyplot\nimport seaborn as sns\nimport matplotlib.dates as mdates\nfrom load_meta import *\nfrom input import *\n\ndate_fmt = mdates.DateFormatter('%H:%M:%S')\nsns.set()\n#################################################################\npattern = \"%Y-%m-%d %H:%M:%S\"\nmetric_path=\"/home/jfr/Thesis/prom-metrics/\"\nthroughput_sum_file = metric_path + run + \"/storm_topology_Windowbolt_Throughput_sum.csv\"\nthroughput_file = metric_path + run + \"/storm_topology_Windowbolt_Throughput.csv\"\ncpu_file = metric_path + run + \"/node_cpu_seconds_total_1m.csv\"\n\nthroughput_sum = pd.read_csv(throughput_sum_file, sep=\",\")\nthroughput_sum [\"throughput\"]=throughput_sum[\"value_1\"]\nthroughput_sum['time'] = throughput_sum['time'].apply(lambda ts: datetime.strptime(ts, pattern))\n\nthroughput = pd.read_csv(throughput_file, sep= \",\")\nthroughput['time'] = throughput['time'].apply(lambda ts: datetime.strptime(ts, pattern))\n\ncpu_util = pd.read_csv(cpu_file, sep=\",\")\ncpu_util['time'] = cpu_util['time'].apply(lambda ts: datetime.strptime(ts, pattern))\n\nfig, axes = pyplot.subplots( figsize = (15,5))\n#axes.xaxis.set_major_formatter(date_fmt)\npyplot.subplot(121)\nannotate()\ntp_ax = sns.lineplot(x = \"time\", y = \"throughput\", data = throughput_sum[throughput_sum.time > start_check], label = \"SUM\")\ntp_ax.set_ylabel(\"Throughput\")\ntp_ax.get_xaxis().set_major_formatter(date_fmt)\ntp_ax.set_xticks([start_check, start_benchmark, fault_end, fault_end + timedelta(minutes=0.5)])\ntp_ax.set_xticklabels([\"t_0\", \"t_1 t_2\", \"t_3\", \"\"])\n#tp_ax.set_xlim(left = start_check)\ni = 0\nthroughput_check = throughput[(throughput.time > start_check) & (throughput.time < start_benchmark)]\nthroughput_benchmark = throughput[throughput.time >= start_benchmark]\nfor col in throughput.columns[2:]:\n i += 1\n sns.lineplot(x=\"time\", y=col, data=throughput[throughput.time > start_check], label = \"Component \" + str(i))\n partition_check = throughput_check[col][throughput_check[col] > 0]\n partition_benchmark = throughput_benchmark[col][throughput_benchmark[col] > 0]\n print(col\n ,partition_check.mean()\n ,partition_benchmark.mean()\n ,partition_check.max()\n ,partition_benchmark.max()\n ,partition_check.min()\n ,partition_benchmark.min()\n ,partition_check.median()\n ,partition_benchmark.median()\n ,sep = \" & \")\n\nthroughput_melted = throughput.melt( id_vars = [\"time\", \"id\"] , var_name = \"Component\" , value_name = \"Throughput\")\nthroughput_check = throughput_melted[(throughput_melted.time > start_check) & (throughput_melted.time < start_benchmark) & (throughput_melted.Throughput > 0)]\nthroughput_benchmark = throughput_melted[(throughput_melted.time >= start_benchmark) & (throughput_melted.Throughput > 0)]\n\nprint(\"Total\"\n ,throughput_check.mean()[\"Throughput\"]\n ,throughput_benchmark.mean()[\"Throughput\"]\n ,throughput_check.max()[\"Throughput\"]\n ,throughput_benchmark.max()[\"Throughput\"]\n ,throughput_check.min()[\"Throughput\"]\n ,throughput_benchmark.min()[\"Throughput\"]\n ,throughput_check.median()[\"Throughput\"]\n ,throughput_benchmark.median()[\"Throughput\"]\n ,sep = \" & \"\n )\n\npyplot.subplot(122)\n#annotate\nannotate()\ncpu_util = cpu_util[cpu_util.time > start_check]\nax = sns.lineplot(x=\"time\", y=\"value_1\", data=cpu_util, label=\"Storm 1\")\nax.yaxis.set_major_formatter(mtick.FuncFormatter(lambda y, _: '{:.0%}'.format(y))) \nax.set_ylabel(\"CPU Utilization\")\nax.get_xaxis().set_major_formatter(date_fmt)\nax.set_xticks([start_check, start_benchmark, fault_end, fault_end + timedelta(minutes=0.5)])\nax.set_xticklabels([\"t_0\", \"t_1 t_2\", \"t_3\", \"\"])\nsns.lineplot(x=\"time\", y=\"value_2\", data=cpu_util, label=\"Storm 2\")\nsns.lineplot(x=\"time\", y=\"value_5\", data=cpu_util, label=\"Storm 3\")\nsns.lineplot(x=\"time\", y=\"value_3\", data=cpu_util, label=\"Kafka\")\nsns.lineplot(x=\"time\", y=\"value_4\", data=cpu_util, label=\"Utilities\")\n\n\npyplot.savefig(\"outputs/\"+run + \"/\"+ run + \"_metrics.pdf\")\n#pyplot.show()\n\n","repo_name":"jfr2102/thesis-figures","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72344510985","text":"# -*- coding: utf-8 -*-\n# https://docs.python.org/2/library/unittest.html\nfrom __future__ import unicode_literals, division, print_function, absolute_import\nimport os\nfrom unittest import (\n TestLoader as BaseTestLoader,\n TextTestRunner as BaseTestRunner,\n TextTestResult as BaseTestResult,\n TestSuite as BaseTestSuite,\n TestCase\n)\nfrom unittest.main import TestProgram as BaseTestProgram\nimport time\nimport logging\nimport argparse\nimport sys\nimport platform\nimport warnings\n\nfrom .compat import *\nfrom .utils import testpath, classpath, chain, loghandler_members\nfrom .environ import TestEnviron\nfrom .path import PathGuesser, PathFinder, RerunFile\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestSuite(BaseTestSuite):\n \"\"\"\n We override the suite so classes that begin with an underscore will be filtered\n out from running, this allows us to easily create base TestCase instances and\n not worry about them being run\n\n https://github.com/python/cpython/blob/3.7/Lib/unittest/suite.py\n \"\"\"\n def addTest(self, test):\n \"\"\"This will filter out \"private\" classes that begin with an underscore\"\"\"\n add_it = True\n if isinstance(test, TestCase):\n add_it = not test.__class__.__name__.startswith(\"_\")\n\n if add_it:\n super(TestSuite, self).addTest(test)\n\n def __str__(self):\n lines = []\n for test in self._tests:\n if isinstance(test, type(self)):\n lines.append(str(test))\n else:\n lines.append(testpath(test))\n\n return \"\\n\".join(lines)\n\n\nclass TestLoader(BaseTestLoader):\n \"\"\"\n This custom loader acts as the translation layer from the cli to our path\n guessing and finding classes\n\n https://docs.python.org/2/library/unittest.html#unittest.TestLoader\n https://github.com/python/cpython/blob/3.7/Lib/unittest/loader.py\n https://github.com/python/cpython/blob/2.7/Lib/unittest/loader.py\n \"\"\"\n suiteClass = TestSuite\n\n def loadTestsFromName(self, name, *args, **kwargs):\n ts = self.suiteClass()\n environ = TestEnviron.get_instance()\n ti = PathGuesser(\n name,\n basedir=self._top_level_dir,\n method_prefix=self.testMethodPrefix\n )\n found = False\n logger.debug(\"Searching for tests in directory: {}\".format(ti.basedir))\n for i, tc in enumerate(ti.possible, 1):\n logger.debug(\"{}. Searching for tests matching: {}\".format(i, tc))\n if tc.has_method():\n for c, mn in tc.method_names():\n logger.debug('Found method test: {}'.format(testpath(c, mn)))\n found = True\n ts.addTest(c(mn))\n environ.counter[\"methods\"] += 1\n\n elif tc.has_class():\n for c in tc.classes():\n logger.debug('Found class test: {}'.format(classpath(c)))\n found = True\n ts.addTest(self.loadTestsFromTestCase(c))\n environ.counter[\"classes\"] += 1\n\n else:\n for m in tc.modules():\n logger.debug('Found module test: {}'.format(m.__name__))\n found = True\n ts.addTest(self.loadTestsFromModule(m))\n environ.counter[\"modules\"] += 1\n\n # if we found a module that matched then don't try for method\n if found: break\n\n if not found:\n ti.raise_any_error()\n\n logger.debug(\"Found {} total tests\".format(ts.countTestCases()))\n return ts\n\n def loadTestsFromNames(self, names, *args, **kwargs):\n ts = self.suiteClass()\n for name in names:\n name_suite = self.loadTestsFromName(name, *args, **kwargs)\n ts.addTest(name_suite)\n\n return ts\n\n\nclass TestResult(BaseTestResult):\n \"\"\"\n https://github.com/python/cpython/blob/3.7/Lib/unittest/result.py\n https://github.com/python/cpython/blob/3.7/Lib/unittest/runner.py\n \"\"\"\n total_tests = 0\n\n def _show_status(self, status):\n pyt_start = self._pyt_start\n pyt_stop = time.time()\n self.stream.writeln(\"{} ({}s)\".format(status, round(pyt_stop - pyt_start, 2)))\n\n def startTest(self, test):\n if self.showAll:\n self._pyt_start = time.time()\n self.stream.write(\"{}/{} \".format(\n self.testsRun + 1,\n self.total_tests,\n ))\n self.stream.flush()\n super(TestResult, self).startTest(test)\n\n def addSuccess(self, test):\n orig_show_all = self.showAll\n if self.showAll:\n self._show_status(\"ok\")\n self.showAll = False\n super(TestResult, self).addSuccess(test)\n self.showAll = orig_show_all\n\n def addError(self, test, err):\n orig_show_all = self.showAll\n if self.showAll:\n self._show_status(\"ERROR\")\n super(TestResult, self).addError(test, err)\n self.showAll = orig_show_all\n\n def addFailure(self, test, err):\n orig_show_all = self.showAll\n if self.showAll:\n self._show_status(\"FAIL\")\n super(TestResult, self).addFailure(test, err)\n self.showAll = orig_show_all\n\n def addExpectedFailure(self, test, err):\n orig_show_all = self.showAll\n if self.showAll:\n self._show_status(\"expected failure\")\n super(TestResult, self).addExpectedFailure(test, err)\n self.showAll = orig_show_all\n\n def addUnexpectedSuccess(self, test):\n orig_show_all = self.showAll\n if self.showAll:\n self._show_status(\"unexpected success\")\n super(TextTestResult, self).addUnexpectedSuccess(test)\n self.showAll = orig_show_all\n\n def _setupStdout(self):\n super(TestResult, self)._setupStdout()\n if self.buffer:\n # Now I realize why I had all that custom stdout/stderr handling\n # code in the previous version, turns out by default buffer didn't\n # remove logs when logging had already been messed with, so now I\n # mess with the loggers and buffer them\n for r in loghandler_members():\n ohs = [\n (self._original_stdout, self._stdout_buffer),\n (self._original_stderr, self._stderr_buffer)\n ]\n for oh in ohs:\n if r.member is oh[0]:\n setattr(r.handler, r.member_name, oh[1])\n\n def _restoreStdout(self):\n if self.buffer:\n for r in loghandler_members():\n ohs = [\n (self._original_stdout, self._stdout_buffer),\n (self._original_stderr, self._stderr_buffer)\n ]\n for oh in ohs:\n if r.member is oh[1]:\n setattr(r.handler, r.member_name, oh[0])\n\n super(TestResult, self)._restoreStdout()\n\n\nclass TestRunner(BaseTestRunner):\n \"\"\"\n https://docs.python.org/3/library/unittest.html#unittest.TextTestRunner\n https://github.com/python/cpython/blob/3.7/Lib/unittest/runner.py\n \"\"\"\n resultclass = TestResult\n\n def _makeResult(self):\n instance = super(TestRunner, self)._makeResult()\n instance.total_tests = self.running_test.countTestCases()\n\n environ = TestEnviron.get_instance()\n environ.update_env_for_test(instance.total_tests)\n\n return instance\n\n def run(self, test):\n if is_py2:\n w = test.main.warnings\n if w:\n warnings.filterwarnings(\"error\")\n\n self.running_test = test\n result = super(TestRunner, self).run(test)\n self.running_test = None\n\n if self.verbosity > 1:\n if len(result.errors) or len(result.failures):\n with RerunFile() as fp:\n count = len(result.errors) + len(result.failures)\n self.stream.writeln(\"Failed or errored {} tests:\".format(count))\n for testcase, failure in chain(result.errors, result.failures):\n tp = testpath(testcase)\n self.stream.writeln(tp)\n fp.writeln(tp)\n self.stream.writeln(\"\")\n\n if len(result.skipped):\n self.stream.writeln(\"Skipped {} tests:\".format(len(result.skipped)))\n for testcase, failure in result.skipped:\n self.stream.writeln(testpath(testcase))\n self.stream.writeln(\"\")\n\n return result\n\n\nclass TestProgram(BaseTestProgram):\n \"\"\"\n https://docs.python.org/3/library/unittest.html#unittest.main\n https://docs.python.org/2.7/library/unittest.html#unittest.main\n https://github.com/python/cpython/blob/3.7/Lib/unittest/main.py\n https://github.com/python/cpython/blob/2.7/Lib/unittest/main.py\n \"\"\"\n @property\n def verbosity(self):\n return self._verbosity\n\n @verbosity.setter\n def verbosity(self, v):\n self._verbosity = v\n\n logger_name = __name__.split(\".\")[0]\n logger = logging.getLogger(logger_name)\n if len(logger.handlers) == 0:\n log_handler = logging.StreamHandler(stream=sys.stderr)\n log_formatter = logging.Formatter('[%(levelname).1s] %(message)s')\n log_handler.setFormatter(log_formatter)\n logger.addHandler(log_handler)\n\n if v < 2:\n logger.setLevel(logging.WARNING)\n else:\n logger.setLevel(logging.DEBUG)\n\n def __init__(self, **kwargs):\n kwargs.setdefault('testLoader', TestLoader())\n kwargs.setdefault('testRunner', TestRunner)\n super(TestProgram, self).__init__(**kwargs)\n\n def parseArgs(self, argv):\n #pout.v(argv)\n if is_py2:\n if len(argv) > 1 and argv[1].lower() == 'discover':\n self._do_discovery(argv[2:])\n else:\n parser = self._getParentArgParser()\n parser.parse_args(argv[1:], self)\n self.createTests()\n\n else:\n ret = super(TestProgram, self).parseArgs(argv)\n\n # after parent's parseArgs is ran self.testNames should be set and\n # should contain all the passed in patterns pyt can use to find the\n # tests, but parseArgs() also calls createTests() which uses that\n # information so by the time we get to right here all tests have been\n # created\n #pout.v(self.testNames, self)\n\n def createTests(self, *args, **kwargs):\n # if we didn't pass in any test names then we want to find all tests\n test_names = getattr(self, \"testNames\", [])\n if len(test_names) == 1 and not test_names[0]:\n if self.rerun:\n self.testNames = list(RerunFile())\n\n super(TestProgram, self).createTests(*args, **kwargs)\n\n # we want to keep open the possibility of grabbing values from this\n # later on down the line\n self.test.main = self\n\n def _print_help(self):\n if is_py2:\n try:\n self.usageExit()\n except SystemExit:\n pass\n\n else:\n super(TestProgram, self)._print_help()\n\n def _getMainArgParser(self, parent):\n parser = super(TestProgram, self)._getMainArgParser(parent)\n\n # python3 will trigger discovery if no tests are passed in, so we\n # override that functionality so we get routed to our path guesser\n for action in parser._actions:\n if action.dest == \"tests\":\n action.default = [\"\"]\n #pout.v(parser._positionals)\n return parser\n\n def _getParentArgParser(self):\n from . import __version__ # avoid circular dependency\n\n if is_py2:\n # so python 2.7 unittest uses optparse, which makes it so you can't\n # specify flags in any position, so we basically are going to build\n # a shadow argparser and bypass 2.7's opt parser so we can be a bit\n # more flexible\n parser = argparse.ArgumentParser()\n parser.prog = self.progName\n parser.print_help = self._print_help\n parser.add_argument(\n '-v', '--verbose',\n dest='verbosity',\n action='store_const',\n const=2,\n help='Verbose output'\n )\n parser.add_argument(\n '-q', '--quiet',\n dest='verbosity',\n action='store_const',\n const=0,\n help='Quiet output'\n )\n parser.add_argument(\n '-f', '--failfast',\n dest='failfast',\n action='store_true',\n help='Stop on first fail or error'\n )\n parser.add_argument(\n '-c', '--catch',\n dest='catchbreak',\n action='store_true',\n help='Catch Ctrl-C and display results so far'\n )\n parser.add_argument(\n '-b', '--buffer',\n dest='buffer',\n action='store_true',\n help='Buffer stdout and stderr during tests'\n )\n parser.add_argument(\n 'testNames',\n metavar='tests',\n default=[\"\"],\n #dest='testNames',\n nargs='*',\n help='a list of any number of test modules, classes and test methods.'\n )\n\n else:\n parser = super(TestProgram, self)._getParentArgParser()\n\n parser.add_argument(\n \"--version\", \"-V\",\n action='version',\n version=\"%(prog)s {}, Python {} ({})\".format(\n __version__,\n platform.python_version(),\n sys.executable\n )\n )\n\n # https://docs.python.org/2/library/warnings.html\n parser.add_argument(\n '--warnings', \"--warning\", \"-w\", \"-W\",\n dest='warnings',\n action='store_const',\n const=\"error\",\n default=\"\",\n help='Converts warnings into errors'\n )\n\n parser.add_argument(\n '--debug', '-d',\n dest='verbosity',\n action='store_const',\n const=2,\n help='Verbose output'\n )\n\n parser.add_argument(\n '--rerun',\n action='store_true',\n help='Rerun previously failed tests'\n )\n\n return parser\n\n\nmain = TestProgram\n\n","repo_name":"Jaymon/pyt","sub_path":"pyt/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":14620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5632593099","text":"import numpy as np\r\nfrom enum import Enum\r\n\r\n# State inherits from Enum\r\nclass State(Enum): # Each piece should have its own state. Opponents as well\r\n HOME = 0\r\n GLOBE_SAFE = 1\r\n GLOBE_UNSAFE = 2 # Enemy globe\r\n STAR = 3\r\n END_ZONE = 4\r\n GOAL = 5 # Not needed??\r\n FURTHEST = 6 # If furthest * 1.2 i reward eller noget\r\n DANGER = 7 # In front of enemy (less than 7 away or 13 if on a star)\r\n GLOBAL_SAFE = 8 # When not in danger? When 7/13 or more away from nearest chasing opponent\r\n DOUBLE = 9 # When two pieces are on the same field\r\n HUNTING = 10 # When an opponent is less than 7 ahead (13 if on star)\r\n\r\nclass Action(Enum):\r\n MoveDice = 0\r\n MoveFromHome = 1\r\n MoveToGlobe = 2\r\n MoveToEnemyGlobe = 3\r\n MoveToStar = 4\r\n MoveToDanger = 5\r\n MoveFromDanger = 6\r\n MoveToEndZone = 7\r\n MoveToGoal = 8\r\n MoveToDouble = 9\r\n MoveToKill = 10\r\n MoveToSuicide = 11\r\n MoveToHunt = 12\r\n\r\n\r\nGLOBES = np.array([9, 22, 35, 48]) # 1 removed\r\nENEMY_GLOBES = np.array([14, 27, 40])\r\nSTARS = np.array([5, 12, 18, 25, 31, 38, 44, 51])\r\nSTARS_7 = ([5, 18, 31, 44])\r\nSTARS_6 = ([12, 25, 38, 51])\r\n\r\ndef getStates(player_pieces, move_pieces, enemy_pieces): # When getting future state, just add dice to player_pieces\r\n old_furthest_index = -1\r\n old_furthest_pos = 0\r\n states = np.zeros((len(player_pieces), len(State)))\r\n for piece in move_pieces:\r\n piece_pos = player_pieces[piece]\r\n state = np.zeros(len(State))\r\n if piece_pos == 0:\r\n state[State.HOME.value] = True\r\n if piece_pos in GLOBES:\r\n state[State.GLOBE_SAFE.value] = True\r\n if piece_pos in ENEMY_GLOBES:\r\n state[State.GLOBE_UNSAFE.value] = True\r\n if piece_pos in STARS:\r\n state[State.STAR.value] = True\r\n if piece_pos > 51:\r\n state[State.END_ZONE.value] = True\r\n if piece_pos == 57:\r\n state[State.GOAL.value] = True\r\n if piece_pos > old_furthest_pos and piece_pos != 57:\r\n old_furthest_pos = piece_pos\r\n old_furthest_index = piece\r\n if check_danger(piece_pos, enemy_pieces):\r\n state[State.DANGER.value] = True\r\n if np.count_nonzero(player_pieces == piece_pos) > 1 and piece_pos != 0:\r\n state[State.DOUBLE.value] = True\r\n state[State.DANGER.value] = False\r\n # if not (state[State.END_ZONE.value] and state[State.DANGER.value] and state[State.GOAL.value]):\r\n # state[State.GLOBAL_SAFE.value] = True\r\n if not (state[State.END_ZONE.value] and state[State.DANGER.value] and state[State.GOAL.value]):\r\n state[State.GLOBAL_SAFE.value] = True\r\n if check_hunt(piece_pos, enemy_pieces) and piece_pos != 0:\r\n state[State.HUNTING.value] = True\r\n\r\n states[piece] = state\r\n if old_furthest_pos > 0:\r\n states[old_furthest_index, State.FURTHEST.value] = True\r\n return states\r\n\r\n\r\ndef get_possible_actions(states, future_states, move_pieces, player_pieces, enemies, dice):\r\n actions = np.zeros((len(player_pieces), len(Action)))\r\n for piece in move_pieces:\r\n current_pos = player_pieces[piece]\r\n future_pos = current_pos + dice\r\n action_table = np.zeros(len(Action))\r\n if future_pos in STARS:\r\n if check_kill(future_pos, enemies) and future_pos < 52:\r\n action_table[Action.MoveToKill.value] = True\r\n action_table[Action.MoveToStar.value] = True\r\n if future_pos in STARS_6:\r\n future_pos += 6\r\n if future_pos in STARS_7:\r\n future_pos += 7\r\n if current_pos == 0 and dice == 6: # Move from home er special case\r\n future_pos = 1\r\n action_table[Action.MoveFromHome.value] = True\r\n elif current_pos != 0:\r\n if future_pos in GLOBES:\r\n action_table[Action.MoveToGlobe.value] = True\r\n if future_pos in ENEMY_GLOBES:\r\n action_table[Action.MoveToEnemyGlobe.value] = True\r\n #if current_pos + dice in STARS:\r\n # action_table[Action.MoveToStar.value] = True\r\n if check_danger(future_pos, enemies): # and not states[piece, State.DANGER.value]: # Not sure what is best here, keeping it like this to hopefully have an opponent overtake\r\n action_table[Action.MoveToDanger.value] = True\r\n if states[piece, State.DANGER.value] and not check_danger(future_pos, enemies):\r\n action_table[Action.MoveFromDanger.value] = True\r\n if future_pos > 51 and current_pos < 52:\r\n action_table[Action.MoveToEndZone.value] = True\r\n action_table[Action.MoveToDanger.value] = False\r\n if states[piece, State.DANGER.value]:\r\n action_table[Action.MoveFromDanger.value] = False\r\n if future_pos == 57:\r\n action_table[Action.MoveToGoal.value] = True\r\n if future_pos in player_pieces and future_pos != 0 and future_pos < 52:\r\n action_table[Action.MoveToDouble.value] = True\r\n if check_suicide(future_pos, enemies) and future_pos < 52:\r\n action_table[Action.MoveToSuicide.value] = True\r\n if check_kill(future_pos, enemies) and future_pos < 52 and not action_table[Action.MoveToSuicide.value]: #For star kill\r\n action_table[Action.MoveToKill.value] = True\r\n # if check_kill(current_pos + dice, enemies):\r\n # action_table[Action.MoveToKill.value] = True\r\n\r\n # if check_suicide(current_pos + dice, enemies):\r\n # action_table[Action.MoveToSuicide.value] = True\r\n if True not in action_table: # If nothing else is possible but the piece is still moveable, move dice\r\n action_table[Action.MoveDice.value] = True\r\n if check_hunt(future_pos, enemies) and not states[piece, State.HUNTING.value]:\r\n action_table[Action.MoveToHunt.value] = True\r\n actions[piece] = action_table\r\n return actions\r\n\r\n\r\ndef check_suicide(future_pos, enemies):\r\n if future_pos > 51:\r\n return False\r\n enemy_idx = 1\r\n #enemy_idx_number = 0\r\n #enemy_idx_list = [0, 1, 2, 3]\r\n #enemy_idx_list.remove(player_idx)\r\n for enemy in enemies:\r\n for enemy_piece_local in enemy:\r\n if enemy_piece_local != 0 and enemy_piece_local < 52:\r\n enemy_piece = (enemy_piece_local + (13 * enemy_idx)) % 52\r\n if future_pos == enemy_piece:\r\n if not enemy_piece == 1 and (enemy_piece in GLOBES or np.count_nonzero(enemy == enemy_piece_local) > 1 or enemy_piece in ENEMY_GLOBES):\r\n return True\r\n # doub_enem = (enemy + (13 * enemy_idx)) % 52\r\n # if enemy_piece in GLOBES or enemy_piece in ENEMY_GLOBES or np\r\n enemy_idx += 1\r\n return False\r\n\r\n\r\ndef check_kill(future_pos, enemies):\r\n enemy_idx = 1\r\n for enemy in enemies:\r\n for enemy_piece_local in enemy:\r\n if enemy_piece_local != 0 and enemy_piece_local < 52:\r\n enemy_piece = (enemy_piece_local + (13 * enemy_idx)) % 52\r\n if future_pos == enemy_piece:\r\n if enemy_piece == 1 or (enemy_piece not in GLOBES and not np.count_nonzero(enemy == enemy_piece_local) > 1 and enemy_piece not in ENEMY_GLOBES):\r\n return True\r\n enemy_idx += 1\r\n return False\r\n\r\n\r\ndef check_danger(piece_pos, enemy_positions):\r\n if piece_pos > 51 or piece_pos in GLOBES:\r\n return False\r\n enemy_idx = 1\r\n for enemy in enemy_positions:\r\n for enemy_piece_local in enemy:\r\n if enemy_piece_local != 0 and enemy_piece_local < 52:\r\n enemy_piece = (enemy_piece_local + (13 * enemy_idx)) % 52\r\n if piece_pos < 7 and enemy_piece > 47:\r\n enemy_piece = enemy_piece - 52\r\n if piece_pos - enemy_piece > 0 and (piece_pos - enemy_piece < 7 or (piece_pos - enemy_piece < 13 and piece_pos in STARS_7) or (piece_pos - enemy_piece < 14 and piece_pos in STARS_6)):\r\n return True\r\n enemy_idx += 1\r\n return False\r\n\r\n\r\ndef check_hunt(piece_pos, enemy_positions):\r\n if piece_pos > 51 or piece_pos == 0:\r\n return False\r\n enemy_idx = 1\r\n for enemy in enemy_positions:\r\n for enemy_piece_local in enemy:\r\n if enemy_piece_local != 0 and enemy_piece_local < 52:\r\n enemy_pos = (enemy_piece_local + (13 * enemy_idx)) % 52\r\n if enemy_pos - piece_pos > 0 and (enemy_pos - piece_pos < 7 or (enemy_pos - piece_pos < 13 and enemy_pos in STARS_7) or (enemy_pos - piece_pos < 14 and enemy_pos in STARS_6)):\r\n return True\r\n enemy_idx += 1\r\n return False\r\n\r\n\r\n","repo_name":"BenjaminLonget/AI2-Submission","sub_path":"StateSpace.py","file_name":"StateSpace.py","file_ext":"py","file_size_in_byte":8909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23334008970","text":"\n# https://leetcode.com/problems/longest-palindromic-substring/\n\n\nclass Solution(object):\n\n def longest_palindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n\n if len(s) == 1:\n return s\n\n result_string = \"\"\n\n for index in range(len(s) - 1):\n even = self.helper(s, index, index)\n odd = self.helper(s, index, index + 1)\n\n if len(even) > len(result_string):\n result_string = even\n\n if len(odd) > len(result_string):\n result_string = odd\n\n return result_string\n\n def helper(self, s, left, right):\n\n while left >= 0 and right < len(s) and s[left] == s[right]:\n left = left - 1\n right = right + 1\n\n return s[left + 1:right]\n\n# Inspired by https://leetcode.com/problems/longest-palindromic-substring/discuss/461877/Cleanest-Python-O(n2)-Time-O(1)-Space-Solution\n# was close to solving but didn't know how to manage odd/even palindromes\n","repo_name":"LuisRivera162/Python_Coding_Exercises","sub_path":"String_and_Arrays/longestPalindrome.py","file_name":"longestPalindrome.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11780410555","text":"import math\n\ndef check(queries, board):\n dx = [0, 1, 0, -1]\n dy = [1, 0, -1, 0]\n counter = [queries[2]-queries[0], queries[3]-queries[1], queries[2]-queries[0], queries[3]-queries[1]]\n for i in range(len(queries)):\n queries[i] -= 1\n x = queries[1]\n y = queries[0]\n temp = board[y][x]\n m = temp\n for zx, zy, c in zip(dx, dy, counter):\n for j in range(c):\n board[y][x] = board[y+zy][x+zx]\n m = min(m, board[y+zy][x+zx])\n y += zy\n x += zx\n board[y][x+1] = temp\n return board, m\n \ndef solution(rows, columns, queries):\n board = []\n answer = []\n f = columns\n s = 1\n for i in range(rows):\n board.append([j for j in range(s, f + 1)])\n s += columns\n f += columns\n for q in queries:\n board, m = check(q, board)\n answer.append(m)\n return answer","repo_name":"SeHeon-Park/Study_Algorithm","sub_path":"프로그래머스/행렬 테두리 회전하기/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"35985127476","text":"from color import Color\nimport math\nimport numpy\n\ndef mutate(color):\n #elegir numero random de 0 a 255\n return numpy.random.randint(0,255)\n\ndef is_in_pop(pop, candidate):\n for individual in pop:\n if individual.equals(candidate):\n return True\n return False\n \ndef personalized_mutation(new_population, population, target):\n new_population.sort(key=lambda x: x.getFitness(target), reverse=True)\n k=0\n while (k < math.floor(len(population)/2)) and (population[k].getFitness(target) > 0.5):\n if not is_in_pop(new_population, population[k]):\n new_population.append(population[k])\n k = k + 1\n\ndef uniform_mutation(new_population, population, target):\n old_color : Color\n for individual in population:\n if numpy.random.uniform() < 0.5:\n old_color = individual\n i = 0\n for i in range(3):\n if i == 1 and numpy.random.uniform() > 0.5:\n individual.red = mutate(individual.red)\n elif i ==2 and numpy.random.uniform() > 0.5:\n individual.green = mutate(individual.green)\n elif numpy.random.uniform() > 0.5:\n individual.blue = mutate(individual.blue)\n \n if not is_in_pop(new_population, individual):\n new_population.append(individual)\n","repo_name":"juandl14/color-mixer","sub_path":"mutation.py","file_name":"mutation.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41350370053","text":"#!/usr/bin/env python3\n\nimport math\n\nimport os\nimport subprocess\nfrom util.ptaout import PTAOutput\n\ndef checkJavaVersion():\n javaversion = subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)\n jv = javaversion.decode(\"utf-8\")\n version_info = jv.split()[2].replace('\"', '')\n major_version = int(version_info.split('.')[0])\n if major_version < 16:\n print(jv)\n print('Error: JRE version does not meet the minimum requirement, i.e., >= 16')\n exit()\n\ndef checkConsistency(pta1, pta2):\n if pta1.app != pta2.app or pta1.analysisName != pta2.analysisName or \\\n pta1.mayFailCasts != pta2.mayFailCasts or \\\n pta1.callEdges != pta2.callEdges or pta1.polyCalls != pta2.polyCalls or \\\n pta1.avgPointsToSize != pta2.avgPointsToSize or \\\n pta1.speedup != pta2.speedup or \\\n pta1.csCallEdges != pta2.csCallEdges or \\\n pta1.csGPts != pta2.csGPts or \\\n pta1.csLPts != pta2.csLPts or \\\n pta1.csFPts != pta2.csFPts or \\\n pta1.sparkreachableMethod != pta2.sparkreachableMethod or pta1.ciocg != pta2.ciocg or \\\n pta1.cidfa != pta2.cidfa or pta1.csobj != pta2.csobj:\n return False\n return True\n\n\ndef merge(pta1, pta2, verbose):\n if pta1.analysisCompleted():\n pta1.analysisTime = float(pta1.analysisTime) + float(pta2.analysisTime)\n pta1.sparkTime += pta2.sparkTime\n pta1.preAnalysisTime += pta2.preAnalysisTime\n if verbose and not checkConsistency(pta1, pta2):\n print('Inconsistent pta outputs:')\n pta1.dump()\n print()\n pta2.dump()\n return pta1\n\n\n# given a run, build such kinds of a map: Map>\ndef buildApp2Tool2PtaOutputMap(run):\n ret = {}\n app2ptas = Util.classifyByAppName(run)\n for app in app2ptas:\n ret[app] = Util.buildAnalysisNameToObjMap(app2ptas[app])\n return ret\n\n\ndef mergeHelper(app2tool2pta1, app2tool2pta2):\n ret = {}\n for app in app2tool2pta1:\n tool2pta1 = app2tool2pta1[app]\n tool2pta2 = app2tool2pta2[app]\n ret[app] = {}\n for tool in tool2pta1:\n pta1 = tool2pta1[tool]\n pta2 = tool2pta2[tool]\n ret[app][tool] = merge(pta1, pta2, True)\n return ret\n\n\ndef average(lst):\n return sum(lst) / len(lst)\n\n\n# give a list, output a list of the log2 value of each element\ndef mylog2(mList):\n for i in range(len(mList)):\n mList[i] = math.log2(mList[i])\n return mList\n\n\n# input should be a list of PTAOutput instances.\ndef buildAnalysisNameToObjMap(ptaOutputs):\n ret = {}\n for elem in ptaOutputs:\n ret[elem.analysisName] = elem\n return ret\n\n\n# input should be a list of PTAOutput instances.\ndef buildAppNameToObjMap(ptaOutputs):\n ret = {}\n for elem in ptaOutputs:\n ret[elem.app] = elem\n return ret\n\n\n# input should be a list of PTAOutput instances.\ndef classifyByToolName(allPtaOutput):\n ret = {}\n for elem in allPtaOutput:\n if elem.analysisName not in ret:\n ret[elem.analysisName] = []\n ret[elem.analysisName].append(elem)\n return ret\n\n\n# input should be a list of PTAOutput instances.\ndef classifyByAppName(ptaOutputs):\n ret = {}\n for elem in ptaOutputs:\n if elem.app not in ret:\n ret[elem.app] = []\n ret[elem.app].append(elem)\n return ret\n\n\n# input should be a list of PTAOutput instances.\ndef classifyByAppName2(ptaOutputs, analysisName):\n ret = {}\n for elem in ptaOutputs:\n if elem.analysisName == analysisName:\n ret[elem.app] = elem\n return ret\n\n\ndef loadPtaOutputs(analysisList, benchmarks, ptaOutputPath):\n allOutput = []\n for r, d, f in os.walk(ptaOutputPath):\n for file in f:\n path = os.path.join(r, file)\n appName = path[path.rfind('/') + 1: path.rfind('_')]\n analysisName = file[file.rfind('_') + 1: -4]\n if appName in benchmarks and analysisName in analysisList:\n ptaOutput = PTAOutput()\n ptaOutput.parsePTAOutput(path)\n allOutput.append(ptaOutput)\n return allOutput\n","repo_name":"QilinPTA/Qilin","sub_path":"artifact/util/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"2427885887","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 19 13:02:05 2018\n\n@author: B51427\n\"\"\"\nimport tensorflow as tf \nimport numpy as np \nfrom tensorflow.python.tools import inspect_checkpoint as chkp\n\ntf.reset_default_graph() \nimported_meta = tf.train.import_meta_graph(\"model_final.meta\")\n\nwith tf.Session() as sess: \n imported_meta.restore(sess, tf.train.latest_checkpoint('./'))\n print(tf.__version__)\n chkp.print_tensors_in_checkpoint_file(file_name=\"./model_final\", tensor_name='', all_tensors=True)#for tf v1.5#, all_tensor_names=True)\n graph = tf.get_default_graph()\n h_est2 = graph.get_tensor_by_name('hor_estimate:0')\n v_est2 = graph.get_tensor_by_name('ver_estimate:0')\n# print(\"h_est: %.2f, v_est: %.2f\" % (h_est2, v_est2))\n print(h_est2, v_est2)\n # Access saved Variables directly\n print(sess.run('bias:0'))\n # This will print 2, which is the value of bias that we saved\n \n \n # Now, let's access and create placeholders variables and\n # create feed-dict to feed new data\n #graph = tf.get_default_graph()\n w1 = graph.get_tensor_by_name(\"w1:0\")\n w2 = graph.get_tensor_by_name(\"w2:0\")\n w3 = graph.get_tensor_by_name(\"w3:0\")\n feed_dict ={w1:13.0,w2:17.0}\n \n #Now, access the op that you want to run. \n op_to_restore = graph.get_tensor_by_name(\"op_to_restore:0\")\n print(sess.run(w3, feed_dict))\n print(sess.run(op_to_restore,feed_dict))\n #This will print 60 which is calculated ","repo_name":"msaadnawaz/Save_Restore_Tensors","sub_path":"restoreTensors.py","file_name":"restoreTensors.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23357178029","text":"# learning module. imilar to namespace in c#\n\n# import whole module as an instance (recommanded)\nimport Lesson51Module as l\n\ngen = l.MyGen()\n\nfor each in gen:\n print(each)\n\n# import methods from a module\nfrom Lesson51Module import MyGen\n\ngen = MyGen()\n\nfor each in gen:\n print(each)","repo_name":"ArlenZhang1988/PythonLearning","sub_path":"Lesson 51 Import module.py","file_name":"Lesson 51 Import module.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73569550664","text":"import os, shutil\npath = os.getcwd() + '/Clean_datas/'\ntar = os.getcwd() +\"/clean/clean/\"\ncount = 0\nfor dir in os.listdir(path):\n filenames = sorted(os.listdir(path+dir+'/'), reverse=True, key=lambda x: int(x.split('.')[0]))\n # for filename in filenames:\n # new = str(int(filename.split('.')[0])+count)+'.txt'\n # shutil.copyfile(path+dir+'/'+filename, tar+new)\n # # break\n # count += len(filenames)\n print(len(filenames), dir)\n\n\n# import os, shutil\n# path = os.getcwd() + '/Datas/'\n# tar = os.getcwd() +\"/clean/origin/\"\n# count = 0\n# dirs=['muaban.net', 'rongbay.com', 'muabannhadat.vn', 'dothi.net', 'vndiaoc.com', 'abz.vn', 'batdongsan.vn', 'nhadat.net', 'batdongsan321.com', 'homedy.com', 'alonhadat.com.vn']\n# for dir in dirs:\n# try:\n# filenames = sorted(os.listdir(path+dir+'/'), reverse=True, key=lambda x: int(x.split('.')[0]))\n# for filename in filenames:\n# new = str(int(filename.split('.')[0])+count)+'.html'\n# shutil.copyfile(path+dir+'/'+filename, tar+new)\n# except:\n# filenames = sorted(os.listdir(path+dir+'/bietthulienke/'), reverse=True, key=lambda x: int(x.split('.')[0]))\n# for filename in filenames:\n# new = str(int(filename.split('.')[0])+count)+'.html'\n# shutil.copyfile(path+dir+'/bietthulienke/'+filename, tar+new)\n# # break\n# count += len(filenames)\n# print(len(filenames), dir)\n","repo_name":"Duongkieunga/Real-Estate","sub_path":"Extract/merge_data.py","file_name":"merge_data.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27447745275","text":"import json\nimport requests\nimport datetime as dt\n\n\n__author__ = \"Tim Taylor \"\n__contributors__ = [\"William Kurkian \"]\n__copyright__ = \"Copyright (c) 2019 Cisco and/or its affiliates.\"\n__license__ = \"Cisco Sample Code License, Version 1.0\"\n\n\nclass WBXTeamsMeetingRoom(object):\n def __init__(self, bot_token, teams_email_address):\n self.teams_email_address = teams_email_address\n self.bot_token = bot_token\n self.__message_json = None\n\n def message_json(self):\n if self.__message_json is None:\n msg = '**Howdy, this is the Cisco Smart Licensing Dashboard Bot. What can I do for you?**\\n\\nThese are the things you can do:\\n'\n msg = msg + '* \\'show me the latest status\\' or \\'status\\' or \\'give me a status update\\'\\n' \\\n '* **Account Related:**\\n' \\\n ' * \\'give me a list of account names\\'\\n' \\\n ' * \\'give me a list of virtual accounts\\'\\n' \\\n '* **Licensing:**\\n' \\\n ' * \\'give me an export of licenses\\' or \\'export licenses\\'\\n' \\\n ' * \\'show me license usage\\' or \\'license usage\\' or \\'usage\\'\\n' \\\n ' * \\'show me the architecture mix\\' or \\'architecture mix\\'\\n' \\\n '* **Licensing Issues:**\\n' \\\n ' * **Expired Licenses Info**\\n' \\\n ' * \\'give me a list of expired licenses\\' or \\'expired licenses\\'\\n' \\\n ' * \\'show me licenses that expire in 30 days\\' or \\'expire 30 days\\' or \\'expire 30\\'\\n' \\\n ' * \\'show me licenses that expire in 60 days\\' or \\'expire 60 days\\' or \\'expire 60\\'\\n' \\\n ' * \\'show me licenses that expire in 90 days\\' or \\'expire 90 days\\' or \\'expire 90\\'\\n' \\\n ' * \\'show me licenses that expire in 180 days\\' or \\'expire 180 days\\' or \\'expire 180\\'\\n' \\\n ' * \\'show me licenses with shortages\\' or \\'license shortage list\\'\\n'\n the_dict = {'toPersonEmail': self.teams_email_address,\n 'markdown': msg}\n self.__message_json = json.dumps(the_dict)\n \n return self.__message_json\n\n def roomId_from_response_json(self, input_json):\n room_Id = ''\n if type(input_json) is dict:\n room_Id = input_json['roomId']\n elif type(input_json) is str:\n the_dict = json.loads(input_json)\n room_Id = the_dict['roomId']\n return room_Id\n\n def personId_from_response_json(self, input_json):\n\n person_Id = ''\n items = []\n if type(input_json) is dict:\n items = input_json['items']\n elif type(input_json) is str:\n the_dict = json.loads(input_json)\n items = the_dict['items']\n\n for item in items:\n if item['personEmail'] == self.teams_email_address:\n person_Id = item['personId']\n break\n\n return person_Id\n\n def create_team_room(self):\n post_url = \"https://api.ciscospark.com/v1/messages\"\n\n post_data = self.message_json()\n\n request_response_results = self.post_request(post_url,\n post_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Authorization\": \"Bearer {}\".format(self.bot_token)},\n post_data=post_data)\n\n return request_response_results\n\n def get_room_membership_list(self, roomId):\n get_url = \"https://api.ciscospark.com/v1/memberships?roomId={}\".format(roomId)\n\n request_response_results = self.get_request(get_url,\n get_headers={\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Authorization\": \"Bearer {}\".format(self.bot_token)})\n return request_response_results\n\n def create_teams_room_get_room_people_ids(self):\n\n\n request_response_results = self.create_team_room()\n request_response_is_successful = request_response_results[0]\n\n date_time = dt.datetime.now()\n\n if request_response_is_successful:\n print(\"{}: creating team room was successful\".format(date_time))\n room_Id = self.roomId_from_response_json(request_response_results[1].json())\n\n request_response_results = self.get_room_membership_list(room_Id)\n request_response_is_successful = request_response_results[0]\n\n if request_response_is_successful:\n print('persondId: {}'.format(json.dumps(json.loads(request_response_results[1].text), indent=4)))\n person_Id = self.personId_from_response_json(request_response_results[1].json())\n\n\n return {'roomId': room_Id, 'personId': person_Id}\n\n def post_request(self, url, post_headers, post_data=None, post_json=None):\n\n spark_request = None\n if post_data:\n spark_request = requests.post(url,\n data=post_data,\n headers=post_headers)\n elif post_json:\n spark_request = requests.post(url, json=post_json, headers=post_headers)\n else:\n return [False, {\"error_key\": \"No json or data payload\"}]\n\n if spark_request.status_code == 200:\n return [True, spark_request]\n else:\n return [False, {\"error_key\": spark_request.status_code,\n \"response_json_key\": json.loads(spark_request.text)}]\n\n def get_request(self, url, get_headers, post_data=None, post_json=None):\n\n spark_request = None\n spark_request = requests.get(url, headers=get_headers)\n\n if spark_request.status_code == 200:\n return [True, spark_request]\n else:\n return [False, {\"error_key\": spark_request.status_code,\n \"response_json_key\": json.loads(spark_request.text)}]\n\n","repo_name":"CiscoSE/Smart-Licensing-Dashboard","sub_path":"Smart-Licensing-Dashboard-Backend/WBXTeamsMeetingRoom/WBXTeamsMeetingRoom.py","file_name":"WBXTeamsMeetingRoom.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"4253984322","text":"from trade_remedies_client.client import Client\n\n\nclass TransientUser:\n \"\"\"\n A TransientUser object mimics a Django auth User but does not\n persist anywhere. Insetad it is created on the fly by the\n APIUserMiddleware middleware using session data.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.id = None\n self.is_authenticated = True\n self.transient_user = True\n self.organisations = []\n self.representing = []\n self.init_fields(**kwargs)\n\n def init_fields(self, **kwargs):\n \"\"\"\n Init all given kwargs to this model as attributes.\n Failure is acceptable.\n \"\"\"\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except Exception:\n pass\n\n def has_group(self, groups):\n if not isinstance(groups, (list, tuple)):\n groups = [groups]\n return any([grp in self.groups for grp in groups])\n\n def has_perm(self, perms):\n if not isinstance(perms, list):\n perms = [perms]\n return any([prm in self.permissions for prm in perms])\n\n @property\n def organisation(self):\n if self.organisations:\n return self.organisations[0]\n return None\n\n @property\n def representing_ids(self):\n \"\"\"\n Return a list of all represented organisation's ids\n \"\"\"\n try:\n return self._representing\n except AttributeError:\n self._representing = list(set([org[\"id\"] for org in self.representing]))\n return self._representing\n\n def is_representing(self, organisation_id, request=None):\n \"\"\"\n Check if this user is representing a given organisation (by it's id)\n \"\"\"\n if request:\n return Client(request.user.token).is_representing(organisation_id)\n return not organisation_id or (\n self.representing and organisation_id in self.representing_ids\n )\n\n def reload(self, request):\n \"\"\"\n Reload the user from the API\n \"\"\"\n user = Client(request.user.token).get_user(self.id, self.organisation[\"id\"])\n request.session[\"user\"] = user\n request.session.modified = True\n self.init_fields(**user)\n return user\n","repo_name":"uktrade/trade-remedies-public","sub_path":"trade_remedies_public/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30782134943","text":"from odoo import exceptions, fields\n\nfrom .common import CommonInvoiceCase\n\n\nclass TestInvoiceServiceAnonymous(CommonInvoiceCase):\n def setUp(self, *args, **kwargs):\n super(TestInvoiceServiceAnonymous, self).setUp(*args, **kwargs)\n self.partner = self.env.ref(\"base.res_partner_2\").copy()\n\n def test_get_invoice_anonymous(self):\n \"\"\"\n Test the get on guest mode (using anonymous user).\n It should not return any result, even if the anonymous user has some\n invoices\n :return:\n \"\"\"\n # Check first without invoice related to the anonymous user\n result = self.service_guest.dispatch(\"search\")\n data = result.get(\"data\", [])\n self.assertFalse(data)\n # Then create a invoice related to the anonymous user\n invoice = self._create_invoice(\n partner=self.backend.anonymous_partner_id, validate=True\n )\n self.assertEqual(invoice.partner_id, self.backend.anonymous_partner_id)\n result = self.service_guest.dispatch(\"search\")\n data = result.get(\"data\", [])\n self.assertFalse(data)\n return\n\n def _make_payment(self, invoice, journal=False, amount=False):\n \"\"\"\n Make payment for given invoice\n :param invoice: account.move recordset\n :param amount: float\n :return: bool\n \"\"\"\n ctx = {\"active_model\": invoice._name, \"active_ids\": invoice.ids}\n wizard_obj = self.register_payments_obj.with_context(**ctx)\n register_payments = wizard_obj.create(\n {\n \"payment_date\": fields.Date.today(),\n \"journal_id\": self.bank_journal_euro.id,\n \"payment_method_id\": self.payment_method_manual_in.id,\n }\n )\n values = {}\n if journal:\n values.update({\"journal_id\": journal.id})\n if amount:\n values.update({\"amount\": amount})\n if values:\n register_payments.write(values)\n register_payments.create_payments()\n\n\nclass TestInvoiceService(CommonInvoiceCase):\n def test_get_invoice_logged(self):\n \"\"\"\n Test the get on a logged user.\n In the first part, the user should have any invoice.\n But to the second, he should have one.\n :return:\n \"\"\"\n # Check first without invoice related to the partner\n result = self.service.dispatch(\"search\")\n data = result.get(\"data\", [])\n self.assertFalse(data)\n # Then create a invoice related to partner\n invoice = self._confirm_and_invoice_sale(self.sale, payment=False)\n self.assertEqual(invoice.partner_id, self.service.partner)\n result = self.service.dispatch(\"search\")\n data = result.get(\"data\", [])\n # As the invoice is not paid, it shouldn't be into the data\n self._check_data_content(data, self.invoice_obj.browse())\n self._make_payment(invoice)\n result = self.service.dispatch(\"search\")\n data = result.get(\"data\", [])\n self._check_data_content(data, invoice)\n return\n\n def test_get_invoice_no_number(self):\n \"\"\"\n Test the get on an invoice without payment_reference (\"number\" into json result)\n :return:\n \"\"\"\n # Check first without invoice related to the partner\n result = self.service.dispatch(\"search\")\n data = result.get(\"data\", [])\n self.assertFalse(data)\n # Then create a invoice related to partner\n invoice = self._confirm_and_invoice_sale(self.sale, payment=False)\n self._make_payment(invoice)\n invoice.write({\"payment_reference\": False})\n result = self.service.dispatch(\"get\", invoice.id)\n data = result.get(\"data\", [])\n self.assertTrue(data.get(\"number\"))\n self.assertFalse(data.get(\"payment_reference\"))\n\n def test_get_invoice_no_date_due(self):\n \"\"\"\n Test the get on an invoice without date_due (\"date_due\" into json result)\n :return:\n \"\"\"\n # Check first without invoice related to the partner\n result = self.service.dispatch(\"search\")\n data = result.get(\"data\", [])\n self.assertFalse(data)\n # Then create a invoice related to partner\n invoice = self._confirm_and_invoice_sale(self.sale, payment=False)\n self._make_payment(invoice)\n invoice.write({\"invoice_date_due\": False})\n result = self.service.dispatch(\"get\", invoice.id)\n data = result.get(\"data\", [])\n self.assertFalse(data.get(\"date_due\"))\n\n def test_get_multi_invoice(self):\n \"\"\"\n Test the get on a logged user.\n Check the search with many invoices\n :return:\n \"\"\"\n sale2 = self.sale.copy()\n sale3 = self.sale.copy()\n sale4 = self.sale.copy()\n invoice1 = self._confirm_and_invoice_sale(self.sale)\n invoice2 = self._confirm_and_invoice_sale(sale2)\n invoice3 = self._confirm_and_invoice_sale(sale3)\n invoice4 = self._confirm_and_invoice_sale(sale4)\n invoices = invoice1 | invoice2 | invoice3 | invoice4\n self.assertEqual(invoice1.partner_id, self.service.partner)\n self.assertEqual(invoice2.partner_id, self.service.partner)\n self.assertEqual(invoice3.partner_id, self.service.partner)\n self.assertEqual(invoice4.partner_id, self.service.partner)\n result = self.service.dispatch(\"search\")\n data = result.get(\"data\", [])\n self._check_data_content(data, invoices)\n return\n\n def test_invoice_get(self):\n \"\"\"\n Test the invoice/get on a logged user.\n Create many invoices to ensure the result will be the one of given id.\n :return:\n \"\"\"\n sale2 = self.sale.copy()\n sale3 = self.sale.copy()\n sale4 = self.sale.copy()\n invoice1 = self._confirm_and_invoice_sale(self.sale)\n invoice2 = self._confirm_and_invoice_sale(sale2)\n invoice3 = self._confirm_and_invoice_sale(sale3)\n invoice4 = self._confirm_and_invoice_sale(sale4)\n self.assertEqual(invoice1.partner_id, self.service.partner)\n self.assertEqual(invoice2.partner_id, self.service.partner)\n self.assertEqual(invoice3.partner_id, self.service.partner)\n self.assertEqual(invoice4.partner_id, self.service.partner)\n result = self.service.dispatch(\"get\", invoice1.id)\n data = result.get(\"data\", [])\n self._check_data_content([data], invoice1)\n return\n\n def test_invoice_get_not_owner(self):\n \"\"\"\n Test the invoice/get on a logged user.\n For this case, the logged user is not the owner of these invoices.\n So we should have an exception.\n :return:\n \"\"\"\n invoice1 = self._confirm_and_invoice_sale(self.sale)\n self.assertEqual(invoice1.partner_id, self.service.partner)\n # The owner can do a 'get' on it\n self.service.dispatch(\"get\", invoice1.id)\n # Now use another user/partner\n with self.work_on_services(partner=self.partner2) as work:\n self.service = work.component(usage=\"invoices\")\n with self.assertRaises(exceptions.MissingError) as cm:\n self.service.dispatch(\"get\", invoice1.id)\n self.assertIn(\"does not exist\", cm.exception.args[0])\n self.assertIn(str(invoice1.id), cm.exception.args[0])\n return\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader_invoice/tests/test_invoice_service.py","file_name":"test_invoice_service.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"14640101221","text":"import sys\nfrom heapq import heappop, heappush\n\ninput = sys.stdin.readline\n\nN = int(input())\nq = []\nfor _ in range(N):\n temp = sorted(list(map(int, input().split())))\n heappush(q, temp)\n# visited = [False] * 2_000_000_001\n\na, b = heappop(q)\nmin_v, max_v = a, b\nanswer = 0\nwhile q:\n a, b = heappop(q)\n if max_v < a:\n answer += (max_v - min_v)\n min_v = a\n max_v = b\n if a <= max_v < b:\n max_v = b\n\nanswer += (max_v - min_v)\nprint(answer)\n\n\"\"\"\n5\n1 3\n2 5\n3 5\n6 7\n8 9\n\n5\n1 9 \n3 6\n 4 10\n5 20\n2 21\n\"\"\"","repo_name":"hugehoo/problem-solving","sub_path":"2022/2022-06/03JUN 2170.py","file_name":"03JUN 2170.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70673381065","text":"# 8 . Write a Python program to find l.c.m. of two numbers\n# For any two numbers given by the user as an input, we have to calculate and print the l.c.m. of that numbers using python programming language.\n# Case1: If the user inputs the numbers 4 and 6.\n#\n# then the output should be '12'.\n#\n# Case2: If the user inputs the numbers 5 and 7.\n#\n# then the output should be '35'.\n\n\ndef find_l_c_m(num1, num2):\n highest_result = num1 * num2\n lowest_result = max(num1, num2)\n result = highest_result\n\n for num in range(highest_result, lowest_result - 1, - 1):\n if (num % num1 == 0) and (num % num2 == 0) and num < result:\n result = num\n\n print(f\"Result is: {result}\")\n\n\n# Second variant:\ndef find_l_c_m_2(num1, num2):\n max_num = max(num1, num2)\n\n while True:\n if max_num % num1 == 0 and max_num % num2 == 0:\n lcm = max_num\n break\n max_num += 1\n\n print(f\"LCM: {lcm}\")\n\n\nif __name__ == \"__main__\":\n find_l_c_m(4, 6)\n find_l_c_m_2(4, 6)\n","repo_name":"reniboyanova/python_most_common_interview_tasks","sub_path":"interview_task_8.py","file_name":"interview_task_8.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18900437224","text":"\"\"\"\nYou are given the head of a singly linked-list. The list can be represented as:\n\nL0 → L1 → … → Ln - 1 → Ln\nReorder the list to be on the following form:\n\nL0 → Ln → L1 → Ln - 1 → L2 → Ln - 2 → …\nYou may not modify the values in the list's nodes. Only nodes themselves may be changed.\n\n\n\nExample 1:\n\n\nInput: head = [1,2,3,4]\nOutput: [1,4,2,3]\nExample 2:\n\n\nInput: head = [1,2,3,4,5]\nOutput: [1,5,2,4,3]\n\n\nConstraints:\n\nThe number of nodes in the list is in the range [1, 5 * 10^4].\n1 <= Node.val <= 1000\n\"\"\"\nfrom typing import Optional\n\nfrom linkedlist.ListNode import ListNode\n\n\nclass ReorderList:\n def reorderList(self, head: Optional[ListNode]) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n if not head or not head.next:\n return\n\n slow, fast = head, head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n\n #now slow is in the middle\n\n #reverse the rest list [4,5,6]==>[6,5,4]\n prev, cur = None, slow\n while cur:\n tmp = cur.next\n cur.next = prev\n prev = cur\n cur = tmp\n\n #merge the two list\n first, second = head, prev\n while second.next:\n tmp = first.next\n first.next = second\n first = tmp\n\n tmp = second.next\n second.next = first\n second = tmp\n\n\n","repo_name":"yangmingxuan/pythonalgorithms","sub_path":"linkedlist/ReorderList.py","file_name":"ReorderList.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73488859785","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport smartsheet as ss\nimport mysql.connector as mc\nimport warnings\nimport pandas as pd\nimport pyautogui\n\nwarnings.filterwarnings(\"ignore\",category=UserWarning)\n\nglobal token,host,user,pwd,dbname,connection,client,tab,sh,isInsert,isUpdate,isDelete\nisInsert=False\nisDelete=False\nisUpdate=False\n# Create your views here.\ndef home(request):\n return render(request,'home.html')\n\ndef display_token(request):\n global token\n token = request.POST[\"token\"]\n if token:\n inf = {'tok': token}\n return render(request, 'result.html', context=inf)\n else:\n return HttpResponse(\"

    Token not provided

    \")\n\ndef display_sheet_user(request):\n global client\n client=ss.Smartsheet(token)\n user_profile=client.Users.get_current_user()\n if 'result' in user_profile.to_dict():\n return render(request,\"access_token_error.html\")\n else:\n accountId=user_profile.account.id\n accountName1=user_profile.first_name\n accountName2=user_profile.last_name\n acc={'id':accountId,'fname':accountName1,'lname':accountName2}\n return render(request,\"sheet_user.html\",context=acc)\n \ndef server_details(request):\n return render(request,\"server_details.html\")\n\ndef server_connection(request):\n global host,user,pwd,dbname,connection\n host=request.POST[\"hostname\"]\n user=request.POST[\"username\"]\n pwd=request.POST[\"password\"]\n dbname=request.POST[\"databasename\"]\n try:\n connection=mc.connect(host=host,user=user,password=pwd,db=dbname)\n return render(request,\"success_server_connection_status.html\")\n except mc.errors.DatabaseError as e:\n return render(request,\"failed_server_connection_status.html\")\n \ndef tables(request):\n global sheet\n sheet=client.Sheets.list_sheets(include_all=True)\n sheets={'sheet':[]}\n for j in sheet.data:\n sheets['sheet'].append(j.name)\n cursor=connection.cursor()\n cursor.execute(\"SHOW TABLES\")\n tables = {'table': []}\n for x in cursor:\n tables['table'].append(x[0])\n context={\n 'sheets':sheets,\n 'tables':tables,\n }\n return render(request,\"database_details.html\",context=context)\n\ndef create_sheet(request):\n global selectedTable,workspace\n selectedTable=request.POST[\"nmb\"]\n workspace=client.Workspaces.list_workspaces(include_all=True)\n workspaces={'ws':[]}\n for l in workspace.data:\n workspaces['ws'].append(l.name)\n context={'workspaces':workspaces,'table':selectedTable}\n return render(request, \"wtc.html\",context=context)\n \ndef sheetinworkspace(request):\n global selectedTable,workspace\n space=request.POST[\"workspace\"]\n w_id=''\n for w in workspace.data:\n if w.name==space:\n w_id=w.id\n cursor = connection.cursor()\n query = f\"SHOW COLUMNS FROM {selectedTable}\"\n cursor.execute(query)\n column_title = [column[0] for column in cursor.fetchall()]\n # # to create a sheet\n sheet_specifications = ss.models.Sheet()\n sheet_specifications.name=selectedTable\n prim = True\n for i in column_title:\n col = ss.models.Column()\n col.title=i\n col.type = 'TEXT_NUMBER'\n col.primary=prim\n prim = False\n sheet_specifications.columns.append(col)\n wspace=client.Workspaces.create_sheet_in_workspace(w_id,sheet_specifications)\n cont={'sheet':selectedTable,'workspace':space}\n return render(request,\"wp.html\",context=cont)\n\ndef sheetinsheets(request):\n global selectedTable\n cursor = connection.cursor()\n query = f\"SHOW COLUMNS FROM {selectedTable}\"\n cursor.execute(query)\n column_title = [column[0] for column in cursor.fetchall()]\n # # to create a sheet\n sheet_specifications = ss.models.Sheet()\n sheet_specifications.name=selectedTable\n prim = True\n for i in column_title:\n col = ss.models.Column()\n col.title=i\n col.type = 'TEXT_NUMBER'\n col.primary=prim\n prim = False\n sheet_specifications.columns.append(col)\n res=client.Home.create_sheet(sheet_specifications)\n cont={'sheet':selectedTable}\n return render(request,\"createsheet.html\",context=cont)\n\n\ndef selected(request):\n global tab,sh\n tab=request.POST[\"tab\"]\n sh=request.POST[\"sh\"]\n selected={'table':tab,'sheet':sh}\n return render(request,\"selected.html\",context=selected)\n\ndef insert(a,df,sheet_id,sheet):\n global isInsert\n initial_column=df.columns[0]\n sibling_id='' \n row_values = df[df[initial_column] == int(a)].values.tolist()\n sheetLists=[row.cells[0].value for row in sheet.rows]\n if sheetLists:\n closest_match = min(sheetLists, key=lambda x: abs(int(x) - int(a)))\n closest_match=str(closest_match)\n columns=sheet.columns\n for row in sheet.rows:\n if row.cells[0].value==closest_match:\n sibling_id=row.id\n \n cells = []\n col_ids = [col.id for col in columns]\n for i in range(len(col_ids)):\n cell_value = row_values[0][i]\n cell = ss.models.Cell()\n cell.column_id=col_ids[i]\n cell.value=str(cell_value)\n cells.append(cell)\n new_row = ss.models.Row()\n new_row.sibling_id=sibling_id\n new_row.cells = cells\n if int(a)>int(closest_match):\n new_row.above=False\n else:\n new_row.above=True \n client.Sheets.add_rows(sheet_id, [new_row])\n isInsert=True\n else:\n cells = []\n col_ids = [col.id for col in sheet.columns]\n for i in range(len(col_ids)):\n cell_value = row_values[0][i]\n cell = ss.models.Cell()\n cell.column_id=col_ids[i]\n cell.value=str(cell_value)\n cells.append(cell)\n new_row = ss.models.Row()\n new_row.cells = cells\n client.Sheets.add_rows(sheet_id, [new_row])\n isInsert=True\n\ndef delete(b,df,sheet_id,sheet):\n global isDelete\n for row in sheet.rows:\n if (str(row.cells[0].value)) == b:\n client.Sheets.delete_rows(sheet_id,row.id)\n isDelete=True\n\ndef update(c,df,sheet_id,sheet):\n global isUpdate\n columns=sheet.columns\n initial_column=df.columns[0]\n rows=sheet.rows\n row_values = []\n row_id = ''\n col_ids = []\n df_row_values = df[df[initial_column] == int(c)].values.tolist()\n df_row_values2 = [str(i) for i in df_row_values[0]]\n for row in rows:\n if row.cells[0].value == c:\n res = client.Sheets.get_row(sheet_id, row.id)\n row_id = row.id\n for cell in res.cells:\n row_values.append(cell.value)\n col_ids.append(cell.column_id)\n if df_row_values2 != row_values:\n for i,j in zip(range(len(df_row_values2)), range(len(col_ids))):\n new_cell = ss.models.Cell()\n new_row = ss.models.Row()\n new_row.id = row_id\n new_cell.column_id = col_ids[j]\n new_cell.value = df_row_values2[i]\n new_row.cells.append(new_cell)\n client.Sheets.update_rows(sheet_id, [new_row])\n isUpdate=True\n \ndef sync(request):\n global df,sheet_id,sheet,isInsert,isUpdate,isDelete\n query=f\"select * from {tab}\"\n df = pd.read_sql(query,connection)\n df = df.replace(r'^\\s*$', 0, regex=True)\n initial_column = df.columns[0]\n df = df.sort_values(by=initial_column)\n sheets = client.Sheets.list_sheets(include_all=True)\n sheet_id=''\n for sh in sheets.data:\n if sh.name==tab:\n sheet_id=sh.id\n sheet=client.Sheets.get_sheet(sheet_id)\n dfRows = [str(row[0]) for _, row in df.iterrows()]\n sheetrows = [str(rows.cells[0].value) for rows in sheet.rows]\n # tab_rows={'irows':[]}\n # for i in range(len(dfRows)):\n # tab_rows[\"irows\"].append(dfRows[i])\n # sh_rows={'irows':[]}\n # for j in range(len(sheetrows)):\n # sh_rows[\"irows\"].append(sheetrows[j])\n # context={\n # 'tab_rows':tab_rows,\n # 'sh_rows':sh_rows,\n # }\n for x in dfRows:\n if x not in sheetrows:\n insert(x,df,sheet_id,sheet)\n else:\n update(x,df,sheet_id,sheet)\n for y in sheetrows:\n if y not in dfRows:\n delete(y,df,sheet_id,sheet)\n\n if isInsert and isDelete and isUpdate:\n isInsert=False\n isUpdate=False\n isDelete=False\n return render(request,\"riud.html\")\n elif isInsert:\n isInsert=False\n return render(request,\"insert.html\")\n elif isDelete:\n isDelete=False\n return render(request,\"delete.html\")\n elif isUpdate:\n isUpdate=False\n return render(request,\"update.html\")\n else:\n return render(request,\"nnu.html\")\n\ndef matching(request):\n global sh,tab\n global connection\n sheets=client.Sheets.list_sheets(include_all=True)\n cursor=connection.cursor()\n query=f\"SHOW COLUMNS FROM {tab}\"\n cursor.execute(query)\n sheet_id=''\n context={'t':tab,'s':sh}\n for v in sheets.data:\n if v.name==sh:\n sheet_id=v.id\n sheet_obj=client.Sheets.get_sheet(sheet_id)\n table_columns=[columns[0] for columns in cursor.fetchall()]\n sheet_columns=[col.title for col in sheet_obj.columns]\n if table_columns==sheet_columns:\n return render(request,\"success_match.html\")\n else:\n return render(request,\"mismatch.html\",context=context)\n\ndef closetab(request):\n pyautogui.hotkey('ctrl','w')\n \n\n \n \n\n\n \n\n","repo_name":"sabrismd/Syncer","sub_path":"formSite/formApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5825350512","text":"\"\"\"\r\nTaiga Asanuma\r\n7-19-2019\r\nVersion 1.0\r\nhttps://github.com/tasanuma714/Raspberry-Pi-Security-Camera-using-Google-Coral-USB-Accelerator\r\n\r\n***Big Credit to Adrian at PyImageSearch for the base code on detectVideoMod.py\r\n\tpython file.\r\n\t\thttps://www.pyimagesearch.com/2019/04/22/getting-started-with-google-corals-tpu-usb-accelerator/\r\n\t\thttps://www.pyimagesearch.com/2019/05/13/object-detection-and-image-classification-with-google-coral-usb-accelerator/\r\n \r\nDescription: This python file uploads the contents of surveillance.txt\r\n\tafter 1 hour and then clears surveillance.txt. This\tpython file will \r\n\tbe called from detectVideoMod.py. You need to be signed into a Google\r\n\tAccount for the program to work.\r\n\t\r\n\"\"\"\r\n\r\n# imports\r\nfrom pydrive.auth import GoogleAuth\r\nfrom pydrive.drive import GoogleDrive\r\n\r\nimport time\r\n\r\n\r\n# function for naming text file with timestamp\r\ndef get_file_name(): \r\n return time.ctime() + \".txt\"\r\n\r\n# reads and clears content of surveillance.txt\r\nfile1 = open(\"surveillance.txt\", \"r\") \r\ncontents = file1.read()\r\nfile1.close()\r\nopen('surveillance.txt', 'w').close()\r\n\r\n# creates a text file with timestamp\r\nfilename2 = get_file_name()\r\nfile2 = open(filename2, \"w\")\r\nfile2.write(contents)\r\nfile2.close()\r\n\r\n# uploads to Google Drive\r\ngauth = GoogleAuth()\r\ndrive = GoogleDrive(gauth)\r\nfile3 = drive.CreateFile()\r\nfile3.SetContentFile(filename2)\r\nfile3.Upload()\r\n","repo_name":"tasanuma714/Raspberry-Pi-Security-Camera-using-Google-Coral-USB-Accelerator","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"13608666537","text":"import logging\nimport itertools\nfrom functools import partial\n\nfrom django.http import HttpResponse\nfrom django.utils.six import string_types\n\nfrom eoxserver.core import Component, ExtensionPoint, env\nfrom eoxserver.services.ows.interfaces import *\nfrom eoxserver.services.ows.decoders import get_decoder\nfrom eoxserver.services.exceptions import (\n ServiceNotSupportedException, VersionNotSupportedException,\n VersionNegotiationException, OperationNotSupportedException,\n HTTPMethodNotAllowedError,\n)\nfrom eoxserver.services.ows.common.v20.exceptionhandler import (\n OWS20ExceptionHandler\n)\n\n\nlogger = logging.getLogger(__name__)\n\nALLOWED_HTTP_METHODS = [\"GET\", \"POST\", \"OPTIONS\"]\n\nclass OptionsRequestHandler(object):\n \"\"\" Dummy request handler class to respond to HTTP OPTIONS requests.\n \"\"\"\n def handle(self, request):\n\n def add_required_headers(headers, required_headers):\n \"\"\" Make sure the required headers are included in the list. \"\"\"\n headers_lc = set(header.lower() for header in headers)\n for required_header in required_headers:\n if required_header.lower() not in headers_lc:\n headers.append(required_header)\n return headers\n\n # return an empty 200 response\n response = HttpResponse()\n response[\"Access-Control-Allow-Methods\"] = \", \".join(\n ALLOWED_HTTP_METHODS\n )\n headers = [\n header.strip() for header in\n request.META.get(\"HTTP_ACCESS_CONTROL_REQUEST_HEADERS\", \"\").split(\",\")\n if header\n ]\n headers = add_required_headers(headers, ['Content-Type'])\n response[\"Access-Control-Allow-Headers\"] = \", \".join(headers)\n return response\n\n\nclass ServiceComponent(Component):\n service_handlers = ExtensionPoint(ServiceHandlerInterface)\n exception_handlers = ExtensionPoint(ExceptionHandlerInterface)\n\n get_service_handlers = ExtensionPoint(GetServiceHandlerInterface)\n post_service_handlers = ExtensionPoint(PostServiceHandlerInterface)\n\n version_negotiation_handlers = ExtensionPoint(VersionNegotiationInterface)\n\n def __init__(self, *args, **kwargs):\n super(ServiceComponent, self).__init__(*args, **kwargs)\n\n def query_service_handler(self, request):\n \"\"\" Tries to find the correct service handler for a given request. The\n request ``method`` can either be \"POST\" (in which case the request body\n is parsed as XML) or \"GET\" (in which case the request is parsed\n as \"KVP\").\n\n If necessary a version negotiation is conducted, following OWS\n guidelines.\n\n :param request: a :class:`Django HttpRequest `\n object\n :returns: the request handler component for the given request\n :raises ServiceNotSupportedException: if the service is not supported\n by any component\n :raises VersionNotSupportedException: if the specified version is not\n supported\n :raises OperationNotSupportedException: if the specified request\n operation is not supported\n \"\"\"\n\n decoder = get_decoder(request)\n\n\n if request.method == \"GET\":\n handlers = self.get_service_handlers\n elif request.method == \"POST\":\n handlers = self.post_service_handlers\n elif request.method == \"OPTIONS\":\n return OptionsRequestHandler()\n else:\n raise HTTPMethodNotAllowedError(\n \"The %s HTTP method is not allowed!\" % request.method,\n ALLOWED_HTTP_METHODS\n )\n #handlers = self.service_handlers\n\n version = decoder.version\n if version is None:\n accepted_versions = decoder.acceptversions\n handlers = filter_handlers(\n handlers, decoder.service, accepted_versions, decoder.request\n )\n return self.version_negotiation(handlers, accepted_versions)\n\n # check that the service is supported\n handlers = filter(\n partial(handler_supports_service, service=decoder.service), handlers\n )\n if not handlers:\n raise ServiceNotSupportedException(decoder.service)\n\n # check that the required version is enabled\n handlers_ = filter(\n lambda h: decoder.version in h.versions, handlers\n )\n if not handlers_:\n # old style version negotiation shall always return capabilities\n if decoder.request == \"GETCAPABILITIES\":\n handlers = [sorted(\n filter(\n lambda h: decoder.request == h.request.upper(), handlers\n ), key=lambda h: max(h.versions), reverse=True\n )[0]]\n else:\n raise VersionNotSupportedException(\n decoder.service, decoder.version\n )\n else:\n handlers = handlers_\n\n # check that the required operation is supported and sort by the highest\n # version supported in descending manner\n handlers = sorted(\n filter(\n lambda h: decoder.request == h.request.upper(), handlers\n ), key=lambda h: max(h.versions), reverse=True\n )\n\n if not handlers:\n operation = decoder.request\n raise OperationNotSupportedException(\n \"Operation '%s' is not supported.\" % operation, operation\n )\n\n # return the handler with the highest version\n logger.debug(\"Handling '%s' request for '%s' service version '%s'.\" %\n (handlers[0].request, handlers[0].service,\n handlers[0].versions[0]))\n return handlers[0]\n\n def query_service_handlers(self, service=None, versions=None, request=None,\n method=None):\n \"\"\" Query the service handler components, filtering optionally by\n ``service``, ``versions``, ``request`` or ``method``.\n \"\"\"\n method = method.upper() if method is not None else None\n\n if method == \"GET\":\n handlers = self.get_service_handlers\n elif method == \"POST\":\n handlers = self.post_service_handlers\n elif method is None:\n handlers = self.service_handlers\n else:\n return []\n\n handlers = filter_handlers(handlers, service, versions, request)\n return sort_handlers(handlers)\n\n def query_exception_handler(self, request):\n try:\n decoder = get_decoder(request)\n handlers = self.exception_handlers\n handlers = sorted(\n filter(\n partial(handler_supports_service, service=decoder.service),\n self.exception_handlers\n ),\n key=lambda h: max(h.versions), reverse=True\n )\n\n # try to get the correctly versioned exception handler\n if decoder.version:\n for handler in handlers:\n if decoder.version in handler.versions:\n return handler\n else:\n # return the exception handler with the highest version,\n # if one is available\n return handlers[0]\n except:\n # swallow any exception here, because we *really* need a handler\n # to correctly show the exception.\n pass\n\n # last resort fallback is a plain OWS exception handler\n return OWS20ExceptionHandler()\n\n def version_negotiation(self, handlers, accepted_versions=None):\n version_to_handler = {}\n for handler in handlers:\n for version in handler.versions:\n version_to_handler.setdefault(version, handler)\n\n available_versions = sorted(version_to_handler.keys(), reverse=True)\n if not available_versions:\n raise VersionNegotiationException()\n\n if not accepted_versions:\n return version_to_handler[available_versions[0]]\n\n combinations = itertools.product(accepted_versions, available_versions)\n for accepted_version, available_version in combinations:\n if accepted_version == available_version:\n return version_to_handler[available_version]\n\n raise VersionNegotiationException()\n\n\ndef filter_handlers(handlers, service=None, versions=None, request=None):\n \"\"\" Utility function to filter the given OWS service handlers by their\n attributes 'service', 'versions' and 'request'.\n \"\"\"\n\n service = service.upper() if service is not None else None\n request = request.upper() if request is not None else None\n\n if service:\n handlers = filter(\n partial(handler_supports_service, service=service), handlers\n )\n\n if request:\n handlers = filter(lambda h: h.request.upper() == request, handlers)\n\n if versions:\n handlers = [\n handler for handler in handlers\n if any(version in handler.versions for version in versions)\n ]\n\n return handlers\n\n\ndef sort_handlers(handlers, ascending=True):\n return sorted(\n handlers, key=lambda h: getattr(h, \"index\", 100000),\n reverse=not ascending\n )\n\n\ndef handler_supports_service(handler, service=None):\n \"\"\" Convenience method to check whether or not a handler supports a service.\n \"\"\"\n if isinstance(handler.service, string_types):\n return handler.service.upper() == service\n else:\n return service in handler.service\n","repo_name":"EOxServer/eoxserver","sub_path":"eoxserver/services/ows/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":9721,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"81"} +{"seq_id":"16067060058","text":"tournaments = int(input())\n\nmoney_for_charity = 0\ntotal_win_days = 0\ntotal_lose_days = 0\n\nfor day in range(tournaments):\n daily_money = 0\n daily_wins = 0\n daily_loses = 0\n while True:\n command = input()\n if command == \"Finish\":\n break\n else:\n sport = command\n result = input()\n if result == \"win\":\n daily_money += 20\n daily_wins += 1\n elif result == \"lose\":\n daily_loses += 1\n if daily_wins > daily_loses:\n daily_money *= 1.1\n total_win_days += 1\n else:\n total_lose_days += 1\n money_for_charity += daily_money\n\nif total_win_days > total_lose_days:\n money_for_charity *= 1.2\n print(f'You won the tournament! Total raised money: {money_for_charity:.2f}')\nelse:\n print(f'You lost the tournament! Total raised money: {money_for_charity:.2f}')","repo_name":"VelinIliev/python-basic-softuni","sub_path":"26-programming_basics_exam-28-29Mar2020/06.-tournament_of_christmas.py","file_name":"06.-tournament_of_christmas.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5963129621","text":"# coding=utf-8\n\nimport os\nimport csv\n\nimport baostock as bs\nimport pandas as pd\n\n\ndef convert_symbol(symbol):\n parts = symbol.lower().split('.')\n\n return '{}.{}'.format(parts[1], parts[0])\n\n\ndef load_adjfactor(symbol, data_path):\n adj_file = os.path.join(data_path, symbol, 'adj.csv')\n\n if os.path.exists(adj_file):\n return load_adjfactor_from_file(adj_file)\n\n lg = bs.login('anonymous', '123456')\n\n if lg.error_code != '0':\n raise ValueError('baostock login failed:{}, {}'.format(lg.error_code, lg.error_msg))\n\n rs_list = []\n rs_factor = bs.query_adjust_factor(convert_symbol(symbol), start_date=\"1990-01-01\", end_date=\"2199-12-31\")\n while (rs_factor.error_code == '0') & rs_factor.next():\n rs_list.append(rs_factor.get_row_data())\n\n if rs_factor.error_code != '0':\n bs.logout()\n raise ValueError('baostock read data failed:{}'.format(rs_factor.error_msg))\n\n bs.logout()\n\n result_factor = pd.DataFrame(rs_list, columns=rs_factor.fields)\n\n result_factor.to_csv(adj_file, encoding=\"utf-8\", index=False)\n\n return load_adjfactor_from_file(adj_file)\n\n\ndef load_adjfactor_from_file(adj_file):\n values = {}\n\n with open(adj_file, 'r') as f:\n reader = csv.DictReader(f)\n for line in reader:\n values[pd.to_datetime(line['dividOperateDate'])] = float(line['backAdjustFactor'])\n # end for\n # end with\n\n return values\n\n\ndef get_adjv_for_date(values, d):\n dates = sorted(values.keys())\n\n try:\n for i in range(len(dates)):\n if d < dates[i]:\n return values[dates[i - 1]] if i > 0 else values[dates[0]]\n\n return values[dates[-1]]\n except:\n raise KeyError\n\nif __name__ == '__main__':\n values = load_adjfactor('600019.SH', 'data')\n print(values)\n","repo_name":"stonewell/learn-curve","sub_path":"src/stock_data_provider/cn_a/baostock_adjfactor.py","file_name":"baostock_adjfactor.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36643653019","text":"import requests\r\nimport bs4\r\n\r\nurl = \"https://en.wikipedia.org/wiki/Apex_Legends\"\r\nreq_html = requests.get(url).text\r\n\r\nif req_html is not None:\r\n soup = bs4.BeautifulSoup(req_html,\"html.parser\")\r\n soup = soup.find(\"div\",{\"class\":\"mw-parser-output\"})\r\n \r\n for p in soup.find_all(\"p\"):\r\n current_text = p.text.strip()\r\n print(current_text)\r\n ","repo_name":"OliverWangData/References","sub_path":"wikipediaScrape.py","file_name":"wikipediaScrape.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24576624966","text":"import example.sns.preprocess # noqa F401\n\nfrom unittest import TestCase, mock\n\nfrom lf3py.aws.symbols import IFireHose\nfrom lf3py.test.helper import data_provider\n\n\nclass TestHandler(TestCase):\n ADD_MODULES = {\n 'lf3py.aws.symbols.IFireHose': 'tests.e2e.example.sns.test_handler.MockFireHose',\n }\n\n @data_provider([\n (\n {\n 'Records': [\n {\n 'TopicArn': 'dev_ping_topic',\n 'Subject': 'ping',\n 'Message': '',\n 'MessageAttributes': {},\n },\n ],\n },\n {\n 'topic': 'dev_ping_topic',\n 'subject': 'ping',\n 'message': 'pong',\n },\n ),\n ])\n def test_ping(self, event: dict, expected: dict):\n with mock.patch('example.sns.modules.add_modules', return_value=self.ADD_MODULES):\n with mock.patch('tests.e2e.example.sns.test_handler.MockFireHose.put') as p:\n from example.sns.handler import handler\n\n handler(event, object())\n p.assert_called_with(expected)\n\n @data_provider([\n (\n {\n 'Records': [\n {\n 'TopicArn': 'notice_topic',\n 'Subject': 'hoge',\n 'Message': 'fuga',\n 'MessageAttributes': {\n 'piyo': {\n 'Type': 'String',\n 'Value': 'hoge.fuga.piyo',\n }\n },\n },\n ],\n },\n {\n 'topic': 'notice_topic',\n 'subject': 'hoge',\n 'message': 'fuga',\n 'values': {'piyo': 'hoge.fuga.piyo'},\n },\n ),\n ])\n def test_notice(self, event: dict, expected: dict):\n with mock.patch('example.sns.modules.add_modules', return_value=self.ADD_MODULES):\n with mock.patch('tests.e2e.example.sns.test_handler.MockFireHose.put') as p:\n from example.sns.handler import handler\n\n handler(event, object())\n p.assert_called_with(expected)\n\n\nclass MockFireHose(IFireHose):\n def __init__(self, delivery_stream_name: str = '') -> None:\n pass\n\n def put(self, payload: dict):\n pass\n","repo_name":"rog-works/lf3py","sub_path":"tests/e2e/example/sns/test_handler.py","file_name":"test_handler.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35845355167","text":"from collections import deque\n\nnumbers = [1, 1, 1, 1, 1]\n\ntarget = 3\n\n\n# 1. queue를 이용해서\n'''\n통과 (858.60ms, 116MB)\n테스트 2 〉\t통과 (853.50ms, 116MB)\n테스트 3 〉\t통과 (0.77ms, 10.2MB)\n테스트 4 〉\t통과 (2.60ms, 10.6MB)\n테스트 5 〉\t통과 (17.49ms, 13.2MB)\n테스트 6 〉\t통과 (1.39ms, 10.3MB)\n테스트 7 〉\t통과 (0.79ms, 10.3MB)\n테스트 8 〉\t통과 (5.07ms, 10.7MB)\n'''\n#\n# def solution(numbers, target):\n# answer = 0\n# queue = deque()\n# n = len(numbers)\n# queue.append([numbers[0], 0])\n# queue.append([-1 * numbers[0], 0])\n# while queue:\n# print(queue)\n# temp, idx = queue.popleft()\n# idx += 1\n# if idx < n:\n# queue.append([temp + numbers[idx], idx])\n# queue.append([temp - numbers[idx], idx])\n# else:\n# if temp == target:\n# answer += 1\n# return answer\n\n# print(solution(numbers, target))\n\n# 2. stack 을 이용해서\n''' 시간비교\n통과 (419.24ms, 10.3MB)\n테스트 2 〉\t통과 (419.19ms, 10.3MB)\n테스트 3 〉\t통과 (0.76ms, 9.98MB)\n테스트 4 〉\t통과 (2.22ms, 10.2MB)\n테스트 5 〉\t통과 (13.24ms, 10.2MB)\n테스트 6 〉\t통과 (1.33ms, 10.2MB)\n테스트 7 〉\t통과 (0.70ms, 10.3MB)\n테스트 8 〉\t통과 (3.68ms, 10.3MB)\n'''\ndef solution(numbers, target):\n answer = 0\n stack = []\n stack.append([numbers[0], 0])\n stack.append([-1 * numbers[0], 0])\n n = len(numbers)\n while stack:\n print(stack)\n temp, idx = stack.pop()\n idx += 1\n if idx < n:\n stack.append([temp + numbers[idx], idx])\n stack.append([temp - numbers[idx], idx])\n else:\n if temp == target:\n answer += 1\n\n return answer\n\n\nprint(solution(numbers, target))\n","repo_name":"MartinPSE/Python","sub_path":"AlgoPrac/3_Week_BFS_DFS/Quiz/TargetNumber.py","file_name":"TargetNumber.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18122351868","text":"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\n\"\"\"\nAuthor:zhengpanone\nEmail:zhengpanone@hotmail.com\ndate:2019/11/21 11:06\n\"\"\"\n\n# import lib\nfrom wtforms import StringField, IntegerField\nfrom wtforms.validators import DataRequired\n\nfrom pm_cms.validators.base import BaseForm\nfrom pm_cms.libs.enums import IsOutSourceEnum, SeqPlatformEnum\n\n\nclass PoolingForm(BaseForm):\n pooling_name = StringField(validators=[DataRequired(message=\"pooling单不能为空\")])\n is_outsource = IntegerField(validators=[DataRequired(message=\"外包为1, 不外包为0\")])\n seq_platform = IntegerField(validators=[DataRequired(message=\"测序平台 Novaseq为1,Miseq为2,Xten为3\")])\n\n def validate_is_outsource(self, value):\n try:\n IsOutSourceEnum(value.data)\n except ValueError as e:\n raise e\n self.is_outsource.data = value.data\n\n def validate_seq_platform(self, value):\n try:\n SeqPlatformEnum(value.data)\n except ValueError as e:\n raise e\n self.seq_platform.data = value.data\n","repo_name":"zhengpanone/flask_web","sub_path":"PM_CMS/pm_cms/validators/pooling_form.py","file_name":"pooling_form.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71286792265","text":"from tpp.Semantic.Checker import *\nfrom tpp.Semantic.simplify_tree import *\n\n\ndef semantic_preprocessor(root):\n type_map = {\n \"flutuante\": T.FLOAT,\n \"inteiro\": T.INTEGER,\n \"texto\": T.TEXT,\n \"vazio\": T.VOID,\n }\n assignment_map = {\n \"ATRIBUICAO\": A.INITIALIZE,\n \"ADICAO_ATRIBUICAO\": A.ADD,\n \"SUBTRACAO_ATRIBUICAO\": A.SUBTRACT,\n \"MULTIPLICACAO_ATRIBUICAO\": A.MULTIPLY,\n \"DIVISAO_ATRIBUICAO\": A.DIVIDE,\n }\n operation_map = {\n \"ADICAO\": O.ADD,\n \"SUBTRACAO\": O.SUBTRACT,\n \"MULTIPLICACAO\": O.MULTIPLY,\n \"DIVISAO\": O.DIVIDE,\n \"MAIOR\": O.GRANTER,\n \"MENOR\": O.LESS,\n \"MAIORIGUAL\": O.GRANTER_EQUAL,\n \"MENORIGUAL\": O.LESS_EQUAL,\n \"IGUAL\": O.EQUAL,\n \"DIFERENTE\": O.DIFFERENT,\n \"NEGACAO\": O.NEGATE,\n \"E_LOGICO\": O.AND,\n \"OU_LOGICO\": O.OR,\n }\n\n def rec(node: Tree):\n if node.identifier == \"programa\":\n # Extract values\n program_list = node.children[0]\n\n # Transform values\n declarations = [rec(c) for c in program_list.children]\n\n # Construct declaration\n return Program(declarations)\n\n if node.identifier == \"funcao_declaracao\":\n # Extract values\n return_type, header, body = node.children\n name, parameters = header.children\n\n # Transform values\n return_type = type_map[return_type.value]\n name = name.value\n\n ps = parameters.children\n # parameters: [Tree] -> Gen<[Tree]> -> Gen<(string, LiteralVariable)> -> [Variable]\n parameters = [] if len(ps) == 1 and ps[0].is_leaf() else ps\n parameters = (p.children for p in parameters)\n parameters = ((type_map[cs[0].value], rec(cs[1])) for cs in parameters)\n parameters = [Variable(t, v.name, v.indexes, True) for t, v in parameters]\n body = [rec(c) for c in body.children]\n\n # Construct declaration\n return FunctionDeclaration(return_type, name, parameters, body)\n if node.identifier == \"criacao_de_variaveis_declaracao\":\n # Extract values\n typing, variables = node.children\n\n # Transform values\n typing = type_map[typing.value]\n\n # variables: Gen<[Tree]> -> Gen<(string, [Tree])> -> [Variable]\n variables = (v.children for v in variables.children)\n variables = ((cs[0].value, list(map(rec, cs[1:]))) for cs in variables)\n variables = [\n Variable(typing, name, indexes, bool(indexes))\n for name, indexes in variables\n ]\n\n # Construct declaration\n return VarsDeclaration(variables)\n if node.identifier == \"atribuicao_declaracao\":\n # Extract values\n variable, assignment, expression = node.children\n\n # Transform values\n variable = rec(variable)\n assignment = assignment_map[assignment.identifier]\n expression = rec(expression)\n\n # Construct declaration\n return AssignmentDeclaration(variable, assignment, expression)\n if node.identifier == \"se_declaracao\":\n # Extract values\n if len(node.children) == 3:\n if_expression, if_body, else_body = node.children\n else_body = else_body.children\n else:\n if_expression, if_body = node.children\n else_body = []\n\n # Transform values\n if_expression = rec(if_expression)\n if_body = [rec(c) for c in if_body.children]\n else_body = [rec(c) for c in else_body]\n\n # Construct declaration\n return IfElseDeclaration(if_expression, if_body, else_body)\n if node.identifier == \"repita_declaracao\":\n # Extract values\n body, expression = node.children\n\n # Transform values\n body = [rec(c) for c in body.children]\n expression = rec(expression)\n\n # Construct declaration\n return RepeatDeclaration(body, expression)\n if node.identifier == \"retorna_declaracao\":\n if node.children:\n # Extract values\n expression = node.children[0]\n\n # Transform values\n expression = rec(expression)\n else:\n expression = None\n\n # Construct declaration\n return ReturnDeclaration(expression)\n\n if node.identifier == \"escreva\":\n return Write(rec(node.children[0]))\n if node.identifier == \"leia\":\n return Read(rec(node.children[0]))\n\n if node.identifier == \"expressao_unaria\":\n # Extract values\n operation, expression = node.children\n\n # Transform values\n operation = operation_map[operation.identifier]\n expression = rec(expression)\n if expression.t in [S.LITERAL_INTEGER, S.LITERAL_FLOAT]:\n expression.value = -expression.value\n return expression\n\n # Construct declaration\n return UnaryExpressionLazy(operation, expression)\n if node.identifier == \"expression\":\n # Extract values\n first, operation, second = node.children\n\n # Transform values\n operation = operation_map[operation.identifier]\n first = rec(first)\n second = rec(second)\n\n # Construct declaration\n return BinaryExpressionLazy(operation, first, second)\n\n if node.identifier == \"ponteiro\":\n return Pointer()\n if node.identifier == \"vetor\":\n return rec(node.children[0])\n if node.identifier == \"var\":\n # Extract values\n identifier, *indexes = node.children\n\n # Transform values\n indexes = [rec(c) for c in indexes]\n\n # Construct declaration\n return LiteralVariableLazy(identifier.value, indexes)\n if node.identifier == \"chamada_de_funcao_declaracao\":\n # Extract values\n name, parameters = node.children\n\n # Transform values\n name = name.value\n parameters = [rec(p) for p in parameters.children]\n\n # Construct declaration\n return FunctionCallLazy(name, parameters)\n\n if node.identifier == \"NUMERO_CIENTIFICO\":\n return LiteralFloat(float(node.value))\n if node.identifier == \"NUMERO_FLUTUANTE\":\n return LiteralFloat(float(node.value))\n if node.identifier == \"NUMERO_INTEIRO\":\n return LiteralInteger(int(node.value))\n if node.identifier == \"ID\":\n return LiteralVariableLazy(node.value, [])\n if node.identifier == \"CARACTERES\":\n return LiteralCharacters(node.value)\n if node.identifier == \"vazio\":\n return Empty()\n\n print()\n print(node.str_tree())\n print()\n print(node, node.children, node._value)\n print()\n raise Exception(\"Unimplemented\")\n\n return None if root is None else rec(root)\n\n\ndef semantic_check(root):\n if root is not None:\n root = simplify_tree(root)\n root = semantic_preprocessor(root)\n return SemanticChecker().check(root)\n","repo_name":"danielbom/tpp-compiler.py","sub_path":"tpp/Semantic/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10549592982","text":"from bs4 import BeautifulSoup\nimport requests\nfrom DataInsertion.database import insertProduct\n\ndef categoryPigiame():\n\n site = 'https://www.pigiame.co.ke'\n page_response = requests.get(site, headers={'User-Agent': 'Mozilla/5.0'})\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n\n category = page_content.find('div',{\"class\":\"home-categories\"}).findAll(\"a\",{\"class\":\"home-category__header\"})\n categories_urls = []\n\n for item in category:\n urlCategory = item.get(\"href\")\n\n categories_urls.append(\n urlCategory\n )\n\n return categories_urls\n\n#print(categoryPigiame())\n\ndef subCategoryPigiame():\n categories_urls = categoryPigiame()\n subUrl = []\n\n for el in categories_urls:\n page_response = requests.get(el, headers={'User-Agent': 'Mozilla/5.0'})\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n\n subCategories = page_content.find('ul', {\"class\": \"filter__category-list--level-2\"}).findAll('li',{\"class\":\"filter__category-list-item--has-content\"})\n\n for item in subCategories:\n subCategoryUrl = item.find('a').get(\"href\")\n\n subUrl.append(\n subCategoryUrl\n )\n\n return subUrl\n\n#print(subCategoryPigiame())\n\n\ndef getAllPage():\n subUrl = subCategoryPigiame()\n page = []\n maxPage = 15\n id = list(range(maxPage))\n del id[0]\n for url in subUrl:\n for item in id:\n link = url + \"?p=\" + str(item)\n page.append({\n 'url': link\n })\n return page\n\n#print(getAllPage())\n\n\ndef scrapPigiame(origin):\n site = 'https://www.pigiame.co.ke'\n page = getAllPage()\n produits = []\n\n for link in page:\n page_response = requests.get(link[\"url\"], headers={'User-Agent': 'Mozilla/5.0'})\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n\n logo = ''\n logoS = ''\n annonce = page_content.find_all(\"div\", {\"class\":\"listing-card--has-content\"})\n\n for item in annonce:\n try:\n url = item.find('a', {\"class\": \"listing-card__inner\"}).get(\"href\")\n lib = item.find(\"div\", {\"class\": \"listing-card__header__title\"}).text.strip()\n img = item.find(\"img\", {\"class\": \"listing-card__image__resource\"}).get(\"src\")\n desc = item.find(\"p\",{\"class\":\"listing-card__description\"}).text\n try:\n prix = int(item.find(\"span\", {\"class\": \"listing-card__price__value\"}).text.strip().replace(u'KSh','').replace(u',', ''))\n except:\n prix=0\n\n produits.append(\n {\n 'id': '',\n 'libProduct': lib,\n 'slug': '',\n 'descProduct': desc,\n 'priceProduct': prix,\n 'imgProduct': img,\n 'numSeller': '',\n 'src': site,\n 'urlProduct': url,\n 'logo': logo,\n 'logoS':logoS,\n 'origin': origin,\n })\n\n except:\n continue\n\n return produits\n\n#print(scrapPigiame(origin=1))\n\n\"\"\"INSERTION DES PRODUITS\"\"\"\n\nproduits = scrapPigiame(origin=1)\ninsertProduct(user='root', passW='', host='localhost', dbname='kenya', produits=produits)","repo_name":"sysall/WebScrapping","sub_path":"Sites/Kenya/3_Pigiame.py","file_name":"3_Pigiame.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7304826298","text":"import sys\nfrom collections import deque\n\nI = sys.stdin.readline\ninitial = []\ndx = [-1,-1,0,1,1,1,0,-1]\ndy = [0,-1,-1,-1,0,1,1,1]\n\nfor _ in range(4):\n tmp = list(map(int,I().split()))\n initial.append([tmp[:2],tmp[2:4],tmp[4:6],tmp[6:]])\n\n\n\ndef moveFish(state):\n newState = [[]for _ in range(4)]\n for i in range(4):\n for j in range(4):\n newState[i].append(state[i][j][::])\n for n in range(1,17):\n check = 0\n for i in range(4):\n for j in range(4):\n if newState[i][j][0] == n:\n while True:\n nx = i+dx[newState[i][j][1]-1]\n ny = j+dy[newState[i][j][1]-1]\n\n if nx < 0 or nx > 3 or ny < 0 or ny > 3:\n newState[i][j][1] += 1\n if newState[i][j][1] > 8:\n newState[i][j][1] = 1\n continue\n\n if newState[nx][ny][0] > 0:\n tmp = newState[nx][ny]\n newState[nx][ny] = newState[i][j]\n newState[i][j] = tmp\n break\n\n else:\n newState[i][j][1] += 1\n if newState[i][j][1] > 8:\n newState[i][j][1] = 1\n\n check = 1\n break\n\n if check == 1:\n break\n\n return newState\n\n\n\nq = deque()\nstate = initial[::]\nq.append((0,0,state,state[0][0][0]))\n# 상어 = 0번 물고기\nstate[0][0][0] = 0\nresult = 0\nwhile q:\n x,y,state,score = q.popleft()\n state = moveFish(state)\n possible = []\n shark = state[x][y]\n nx = x\n ny = y\n while True:\n nx += dx[shark[1]-1]\n ny += dy[shark[1]-1]\n\n if nx < 0 or nx > 3 or ny < 0 or ny > 3:\n break\n\n if state[nx][ny][0] > 16:\n continue\n\n possible.append((nx,ny))\n\n for tx,ty in possible:\n newState = [[]for _ in range(4)]\n for i in range(4):\n for j in range(4):\n newState[i].append(state[i][j][::])\n newState[x][y] = [17,0]\n tmp = newState[tx][ty][0]\n newState[tx][ty][0] = 0\n result = max(result,score+tmp)\n q.append((tx,ty,newState,score+tmp))\n\nprint(result)","repo_name":"dlwhd990/BOJ-2022","sub_path":"BOJ/[19236]청소년상어.py","file_name":"[19236]청소년상어.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18355591175","text":"from typing import Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..models.audit_log import AuditLog\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"GetAuditLogsResponse\")\n\n\n@attr.s(auto_attribs=True)\nclass GetAuditLogsResponse:\n \"\"\"GetAuditLogs response.\n\n Attributes:\n logs (Union[Unset, List[AuditLog]]): List of audit log events.\n \"\"\"\n\n logs: Union[Unset, List[AuditLog]] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n logs: Union[Unset, List[Dict[str, Any]]] = UNSET\n if not isinstance(self.logs, Unset):\n logs = []\n for logs_item_data in self.logs:\n logs_item = logs_item_data.to_dict()\n\n logs.append(logs_item)\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if logs is not UNSET:\n field_dict[\"logs\"] = logs\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n logs = []\n _logs = d.pop(\"logs\", UNSET)\n for logs_item_data in _logs or []:\n logs_item = AuditLog.from_dict(logs_item_data)\n\n logs.append(logs_item)\n\n get_audit_logs_response = cls(\n logs=logs,\n )\n\n get_audit_logs_response.additional_properties = d\n return get_audit_logs_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"yolabingo/docker-hub-api-client","sub_path":"docker_hub_api_client/models/get_audit_logs_response.py","file_name":"get_audit_logs_response.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35160104479","text":"import dealWithDBResults\nimport hitrate\nimport os\nimport gc\nwith open('results/actionDetectionFolders.txt','rb') as folders:\n\tconnType = \"sqlite\"\n\tconn = dealWithDBResults.getDB(connType)\n\tdealWithDBResults.withoutoverlaps = True\n\tdealWithDBResults.suppress_output = True\n\tdealWithDBResults.debugQuery = False\n\tdealWithDBResults.connType = connType\n\thitrate.args = lambda:None\n\thitrate.args.__dict__.update({\"normalizefirst\": True, \"debug\":False, \"scan\":False, \"summary\":True, \"smart\":False, \"latex\":False, \"examples_only\":False, \"failures\":False})\n\ttry:\n\t\tfor folder in (x.rstrip() for x in folders if not x.startswith(\"#\")):\n\t\t\tgc.collect()\n\t\t\tfolderPath = os.path.join(\"results\",folder)\n\t\t\texamples = dealWithDBResults.getExamples(folderPath,mode=\"list\")\n\t\t\tos.system(\"python dealWithDBResults.py -a {} upanddown\".format(folderPath))\n\t\t\t#dealWithDBResults.processAndUploadExamples(folderPath,examples,conn)\n\t\t\t#dealWithDBResults.downloadExamples(examples,connType,conn)\n\t\t\thitrate.exceptions = list()\n\t\t\tsummary = hitrate.doit()\n\t\t\twith open('results/{}.csv'.format(folder),'wb') as results:\n\t\t\t\tfor key in summary.keys():\n\t\t\t\t\tresults.write(\"{},{}\\n\".format(key,summary[key]))\n\t\t\t\tresults.write(\"diff,{}\\n\".format(summary['causalgrammar']-summary['origdata']))\n\tfinally:\n\t\tconn.close()\n","repo_name":"vcla/Causality","sub_path":"scanFolders.py","file_name":"scanFolders.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"20365271260","text":"import datetime\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\n# region Demo1\n# https://github.com/j32u4ukh/Tensorflow-101/blob/master/notebooks/13_Generative_Adversarial_Network.ipynb\n\n# %%\ndef weight_variable(shape, name):\n # 截尾常態分配\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, name)\n\n\ndef bias_variable(shape, name):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name)\n\n\nmnist = input_data.read_data_sets(\"./data/MNIST_data/\", one_hot=True)\n\n# Parameters\nbatch_size = 256\ng_dim = 128\n\nx_d = tf.placeholder(tf.float32, shape=[None, 784])\nx_g = tf.placeholder(tf.float32, shape=[None, 128])\n\nweights = {\n \"w_d1\": weight_variable([784, 128], \"w_d1\"),\n \"w_d2\": weight_variable([128, 1], \"w_d2\"),\n \"w_g1\": weight_variable([128, 256], \"w_g1\"),\n \"w_g2\": weight_variable([256, 784], \"w_g2\")\n}\n\nbiases = {\n \"b_d1\": bias_variable([128], \"b_d1\"),\n \"b_d2\": bias_variable([1], \"b_d2\"),\n \"b_g1\": bias_variable([256], \"b_g1\"),\n \"b_g2\": bias_variable([784], \"b_g2\"),\n}\n\nvar_d = [weights[\"w_d1\"], weights[\"w_d2\"], biases[\"b_d1\"], biases[\"b_d2\"]]\nvar_g = [weights[\"w_g1\"], weights[\"w_g2\"], biases[\"b_g1\"], biases[\"b_g2\"]]\n\n\n# Build generator and discriminator networks\n# 一個 GAN 裡包含了兩個神經網路,一個是 generator,\n# 另一個是 discriminator.首先照著定義個別建立網路.\n#\n# 生成式網路 generator(z) 會接受一個 128 維的輸入 z 並經過兩層網路以後產生一個 784 維的輸出,這就是所謂假的 MNIST 資料.\n#\n# 判斷式網路 discrimination(x) 接受一個 784 維的輸入 x 並經過兩層網路以後產生一個 1 維的輸出,\n# 而輸出經過 sigmoid 之後是一個 0 ~ 1 的數,代表著判斷式網路此輸出 x 是真實資料的機率.\ndef generator(z):\n h_g1 = tf.nn.relu(tf.add(tf.matmul(z, weights[\"w_g1\"]), biases[\"b_g1\"]))\n h_g2 = tf.nn.sigmoid(tf.add(tf.matmul(h_g1, weights[\"w_g2\"]), biases[\"b_g2\"]))\n return h_g2\n\n\ndef discriminator(x):\n h_d1 = tf.nn.relu(tf.add(tf.matmul(x, weights[\"w_d1\"]), biases[\"b_d1\"]))\n h_d2 = tf.nn.sigmoid(tf.add(tf.matmul(h_d1, weights[\"w_d2\"]), biases[\"b_d2\"]))\n return h_d2\n\n\n# Build cost functions\n# 在定義訓練 cost 函數的時候,和之前的神經網路比較不一樣的地方是它沒有一個標準的 loss 計算.\n# 畢竟這個模型要判斷的問題是真或假的問題.\ndef sample_Z(m, n):\n return np.random.uniform(-1., 1., size=[m, n])\n\n\n# g_sample:生成器的 output\ng_sample = generator(x_g)\n\n# d_real:判別器 判斷 真實數據 的 output\nd_real = discriminator(x_d)\n\n# d_fake:判別器 判斷 生成數據 的 output\nd_fake = discriminator(g_sample)\n\nd_loss = -tf.reduce_mean(tf.log(d_real) + tf.log(1. - d_fake))\ng_loss = -tf.reduce_mean(tf.log(d_fake))\n\n# Training\n# 接下來使用 AdamOptimizer 來做訓練,其中提到了要先對 discriminator 更新參數再對 geneartor 更新.\n# 因此需要在其中指定更新參數 var_list.\n# 只更新 discriminator\nd_optimizer = tf.train.AdamOptimizer(0.0005).minimize(d_loss, var_list=var_d)\n# 只更新 generator parameters\ng_optimizer = tf.train.AdamOptimizer(0.0001).minimize(g_loss, var_list=var_g)\n\n\ndef plot(samples):\n # fig = plt.figure(figsize=(4, 4))\n plt.figure(figsize=(4, 4))\n gs = gridspec.GridSpec(4, 4)\n gs.update(wspace=0.05, hspace=0.05)\n for i, sample in enumerate(samples):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample.reshape(28, 28), cmap='gray')\n\n plt.show()\n\n\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\nfor step in range(20001):\n batch_x = mnist.train.next_batch(batch_size)[0]\n _, d_loss_train = sess.run([d_optimizer, d_loss], feed_dict={x_d: batch_x, x_g: sample_Z(batch_size, g_dim)})\n _, g_loss_train = sess.run([g_optimizer, g_loss], feed_dict={x_g: sample_Z(batch_size, g_dim)})\n\n if step <= 1000:\n if step % 100 == 0:\n print(\"step %d, discriminator loss %.5f\" % (step, d_loss_train)),\n print(\" generator loss %.5f\" % g_loss_train)\n if step % 1000 == 0:\n g_sample_plot = g_sample.eval(feed_dict={x_g: sample_Z(16, g_dim)})\n plot(g_sample_plot)\n else:\n if step % 1000 == 0:\n print(\"step %d, discriminator loss %.5f\" % (step, d_loss_train)),\n print(\" generator loss %.5f\" % g_loss_train)\n if step % 2000 == 0:\n g_sample_plot = g_sample.eval(feed_dict={x_g: sample_Z(16, g_dim)})\n plot(g_sample_plot)\nsess.close()\n# endregion\n\n\n# region Demo2:DCGAN\n# https://github.com/c1mone/Tensorflow-101/blob/master/notebooks/14_DCGAN_with_MNIST.ipynb\n# 跟 GAN 不同的是,DCGAN 把 convolution 引進網路結構中.\n# 在 discriminator 中輸入的圖像會經過層層 convolution 之後變成一個預測是否為真實圖片的機率;\n# 在 generator 中會把輸入的 z 向量經過層層 deconvolution 輸出生成式的圖片.可以看到它的結構如下圖.\ndef weight_variable(shape, name):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, name)\n\n\ndef bias_variable(shape, name):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name)\n\n\nbatch_size = 256\ng_dim = 100\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef deconv2d(x, W, output_shape):\n return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, 2, 2, 1], padding='SAME')\n\n\n# Build convolutional discriminator and generator\n# 這裡我們會建立生成式網路 (generator) 以及判斷式網路 (discriminator)\n\n# Discriminator\n# 在 discriminator 中,首先會輸入一個 None x 784 維的 MNIST 資料,首先會先把它變成一個二維的圖片向量也就是\n# None x 28(width) x 28(height) x 1(channels).接下來就是連續的使用 filter 作 convolution,其中 strides = 2,\n# 使得每做完一次 convolution 的輸出長和寬就會變成一半.以下是輸入圖片的維度變化順序:\n# 原始輸入維度 None x 28 x 28 x 1\n# 經過第一個 5 x 5 convolution filter 後輸出維度為 None x 14 x 14 x 32\n# 經過第二個 5 x 5 convolution filter 後輸出維度為 None x 7 x 7 x 64\n# 經過一個 fully connected 層變成維度 None x 1 的判斷機率輸出\n\n# Generator\n# 在 generator 中,首先會輸入一個 None x 128 維從 noise 中取樣出的向量,首先經過一個全連結會把它擴展成\n# None x 4*4*64,再把它 reshape 成一個 None x 4(width) x 4(height) x 64(channels) 的輸入\n# (可以想像成把輸入向量變成一個 4x4 的圖像).接下來就是經過一連串的 deconvolution,最後輸出圖片.以下是輸入的維度變化順序:\n\n# 原始輸入取樣向量維度 None x 100\n# 經過第一個 fully connected 層以及 reshape 變成維度 None x 4 x 4 x 64\n# 經過第一個 5 x 5 deconvolution filter 後輸出維度為 None x 7 x 7 x 32\n# 經過第二個 5 x 5 deconvolution filter 後輸出維度為 None x 14 x 14 x 16\n# 經過第三個 5 x 5 deconvolution filter 後輸出維度為 None x 28 x 28 x 1\nx_d = tf.placeholder(tf.float32, shape=[None, 784])\nx_g = tf.placeholder(tf.float32, shape=[None, g_dim])\nweights = {\n \"w_d1\": weight_variable([5, 5, 1, 32], \"w_d1\"),\n \"w_d2\": weight_variable([5, 5, 32, 64], \"w_d2\"),\n \"w_d3\": weight_variable([7 * 7 * 64, 1], \"w_d3\"),\n\n \"w_g1\": weight_variable([g_dim, 4 * 4 * 64], \"w_g1\"),\n \"w_g2\": weight_variable([5, 5, 32, 64], \"w_g2\"),\n \"w_g3\": weight_variable([5, 5, 16, 32], \"w_g3\"),\n \"w_g4\": weight_variable([5, 5, 1, 16], \"w_g4\")\n}\n\nbiases = {\n \"b_d1\": bias_variable([32], \"b_d1\"),\n \"b_d2\": bias_variable([64], \"b_d2\"),\n \"b_d3\": bias_variable([1], \"b_d3\"),\n \"b_g1\": bias_variable([4 * 4 * 64], \"b_g1\"),\n \"b_g2\": bias_variable([32], \"b_g2\"),\n \"b_g3\": bias_variable([16], \"b_g3\"),\n \"b_g4\": bias_variable([1], \"b_g4\"),\n}\n\nvar_d = [weights[\"w_d1\"],\n weights[\"w_d2\"],\n weights[\"w_d3\"],\n biases[\"b_d1\"],\n biases[\"b_d2\"],\n biases[\"b_d3\"]]\n\nvar_g = [weights[\"w_g1\"],\n weights[\"w_g2\"],\n weights[\"w_g3\"],\n weights[\"w_g4\"],\n biases[\"b_g1\"],\n biases[\"b_g2\"],\n biases[\"b_g3\"],\n biases[\"b_g4\"]]\n\n\ndef generator(z):\n # 100 x 1\n h_g1 = tf.nn.relu(tf.add(tf.matmul(z, weights[\"w_g1\"]), biases[\"b_g1\"]))\n # -1 x 4*4*128\n h_g1_reshape = tf.reshape(h_g1, [-1, 4, 4, 64])\n\n output_shape_g2 = tf.stack([tf.shape(z)[0], 7, 7, 32])\n h_g2 = tf.nn.relu(tf.add(deconv2d(h_g1_reshape, weights[\"w_g2\"], output_shape_g2), biases[\"b_g2\"]))\n\n output_shape_g3 = tf.stack([tf.shape(z)[0], 14, 14, 16])\n h_g3 = tf.nn.relu(tf.add(deconv2d(h_g2, weights[\"w_g3\"], output_shape_g3), biases[\"b_g3\"]))\n\n output_shape_g4 = tf.stack([tf.shape(z)[0], 28, 28, 1])\n h_g4 = tf.nn.tanh(tf.add(deconv2d(h_g3, weights[\"w_g4\"], output_shape_g4), biases[\"b_g4\"]))\n\n return h_g4\n\n\ndef discriminator(x):\n x_reshape = tf.reshape(x, [-1, 28, 28, 1])\n # 28 x 28 x 1\n h_d1 = tf.nn.relu(tf.add(conv2d(x_reshape, weights[\"w_d1\"]), biases[\"b_d1\"]))\n # 14 x 14 x 32\n h_d2 = tf.nn.relu(tf.add(conv2d(h_d1, weights[\"w_d2\"]), biases[\"b_d2\"]))\n # 7 x 7 x 64\n h_d2_reshape = tf.reshape(h_d2, [-1, 7 * 7 * 64])\n h_d3 = tf.nn.sigmoid(tf.add(tf.matmul(h_d2_reshape, weights[\"w_d3\"]), biases[\"b_d3\"]))\n return h_d3\n\n\n# Build cost function\ndef sample_Z(m, n):\n return np.random.uniform(-1., 1., size=[m, n])\n\n\ng_sample = generator(x_g)\nd_real = discriminator(x_d)\nd_fake = discriminator(g_sample)\n\nd_loss = -tf.reduce_mean(tf.log(d_real) + tf.log(1. - d_fake))\ng_loss = -tf.reduce_mean(tf.log(d_fake))\n\n# 只更新 discriminator\nd_optimizer = tf.train.AdamOptimizer(0.0001).minimize(d_loss, var_list=var_d)\n\n# 只更新 generator parameters\ng_optimizer = tf.train.AdamOptimizer(0.0001).minimize(g_loss, var_list=var_g)\n\n\n# Training\ndef plot(samples):\n # fig = plt.figure(figsize=(4, 4))\n plt.figure(figsize=(4, 4))\n gs = gridspec.GridSpec(4, 4)\n gs.update(wspace=0.05, hspace=0.05)\n for i, sample in enumerate(samples):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample.reshape(28, 28), cmap='gray')\n\n plt.show()\n\n\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\nfor step in range(1, 20001):\n batch_x = mnist.train.next_batch(batch_size)[0]\n _, d_loss_train = sess.run([d_optimizer, d_loss], feed_dict={x_d: batch_x, x_g: sample_Z(batch_size, g_dim)})\n _, g_loss_train = sess.run([g_optimizer, g_loss], feed_dict={x_g: sample_Z(batch_size, g_dim)})\n\n if step <= 1000:\n if step % 100 == 0:\n print(\"step %d, discriminator loss %.5f\" % (step, d_loss_train)),\n print(\" generator loss %.5f\" % g_loss_train)\n if step % 1000 == 0:\n g_sample_plot = g_sample.eval(feed_dict={x_g: sample_Z(16, g_dim)})\n plot(g_sample_plot)\n else:\n if step % 1000 == 0:\n print(\"step %d, discriminator loss %.5f\" % (step, d_loss_train)),\n print(\" generator loss %.5f\" % g_loss_train)\n if step % 2000 == 0:\n g_sample_plot = g_sample.eval(feed_dict={x_g: sample_Z(16, g_dim)})\n plot(g_sample_plot)\n# endregion\n\n\n# region Demo3\n# https://medium.com/@gau820827/%E6%95%99%E9%9B%BB%E8%85%A6%E7%95%AB%E7%95%AB-\n# %E5%88%9D%E5%BF%83%E8%80%85%E7%9A%84%E7%94%9F%E6%88%90%E5%BC%8F%E5%B0%8D%E6%8A%97%E7%B6%B2%E8%B7%AF-\n# gan-%E5%85%A5%E9%96%80%E7%AD%86%E8%A8%98-tensorflow-python3-dfad71662952\nmnist = input_data.read_data_sets(\"data/MNIST_data/\")\nsample_image = mnist.train.next_batch(1)[0]\nprint(sample_image.shape)\n\nsample_image = sample_image.reshape([28, 28])\nplt.imshow(sample_image, cmap='Greys')\n\n\ndef discriminator(images, reuse_variables=None):\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):\n # First convolutional and pool layers\n # This finds 32 different 5 x 5 pixel features\n d_w1 = tf.get_variable('d_w1', [5, 5, 1, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))\n d_b1 = tf.get_variable('d_b1', [32], initializer=tf.constant_initializer(0))\n d1 = tf.nn.conv2d(input=images, filter=d_w1, strides=[1, 1, 1, 1], padding='SAME')\n d1 = d1 + d_b1\n d1 = tf.nn.relu(d1)\n d1 = tf.nn.avg_pool(d1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Second convolutional and pool layers\n # This finds 64 different 5 x 5 pixel features\n d_w2 = tf.get_variable('d_w2', [5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.02))\n d_b2 = tf.get_variable('d_b2', [64], initializer=tf.constant_initializer(0))\n d2 = tf.nn.conv2d(input=d1, filter=d_w2, strides=[1, 1, 1, 1], padding='SAME')\n d2 = d2 + d_b2\n d2 = tf.nn.relu(d2)\n d2 = tf.nn.avg_pool(d2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # First fully connected layer\n d_w3 = tf.get_variable('d_w3', [7 * 7 * 64, 1024], initializer=tf.truncated_normal_initializer(stddev=0.02))\n d_b3 = tf.get_variable('d_b3', [1024], initializer=tf.constant_initializer(0))\n d3 = tf.reshape(d2, [-1, 7 * 7 * 64])\n d3 = tf.matmul(d3, d_w3)\n d3 = d3 + d_b3\n d3 = tf.nn.relu(d3)\n\n # Second fully connected layer\n d_w4 = tf.get_variable('d_w4', [1024, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))\n d_b4 = tf.get_variable('d_b4', [1], initializer=tf.constant_initializer(0))\n d4 = tf.matmul(d3, d_w4) + d_b4\n\n # d4 contains unscaled values\n return d4\n\n\ndef generator(z, batch_size, z_dim):\n g_w1 = tf.get_variable('g_w1', [z_dim, 3136], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n g_b1 = tf.get_variable('g_b1', [3136], initializer=tf.truncated_normal_initializer(stddev=0.02))\n g1 = tf.matmul(z, g_w1) + g_b1\n g1 = tf.reshape(g1, [-1, 56, 56, 1])\n g1 = tf.contrib.layers.batch_norm(g1, epsilon=1e-5, scope='g_b1')\n g1 = tf.nn.relu(g1)\n\n # Generate 50 features\n g_w2 = tf.get_variable('g_w2', [3, 3, 1, z_dim / 2], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n g_b2 = tf.get_variable('g_b2', [z_dim / 2], initializer=tf.truncated_normal_initializer(stddev=0.02))\n g2 = tf.nn.conv2d(g1, g_w2, strides=[1, 2, 2, 1], padding='SAME')\n g2 = g2 + g_b2\n g2 = tf.contrib.layers.batch_norm(g2, epsilon=1e-5, scope='g_b2')\n g2 = tf.nn.relu(g2)\n g2 = tf.image.resize_images(g2, [56, 56])\n\n # Generate 25 features\n g_w3 = tf.get_variable('g_w3', [3, 3, z_dim / 2, z_dim / 4], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n g_b3 = tf.get_variable('g_b3', [z_dim / 4], initializer=tf.truncated_normal_initializer(stddev=0.02))\n g3 = tf.nn.conv2d(g2, g_w3, strides=[1, 2, 2, 1], padding='SAME')\n g3 = g3 + g_b3\n g3 = tf.contrib.layers.batch_norm(g3, epsilon=1e-5, scope='g_b3')\n g3 = tf.nn.relu(g3)\n g3 = tf.image.resize_images(g3, [56, 56])\n\n # Final convolution with one output channel\n g_w4 = tf.get_variable('g_w4', [1, 1, z_dim / 4, 1], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n g_b4 = tf.get_variable('g_b4', [1], initializer=tf.truncated_normal_initializer(stddev=0.02))\n g4 = tf.nn.conv2d(g3, g_w4, strides=[1, 2, 2, 1], padding='SAME')\n g4 = g4 + g_b4\n g4 = tf.sigmoid(g4)\n\n # Dimensions of g4: batch_size x 28 x 28 x 1\n return g4\n\n\nz_dimensions = 100\nz_placeholder = tf.placeholder(tf.float32, [None, z_dimensions])\n\ngenerated_image_output = generator(z_placeholder, 1, z_dimensions)\nz_batch = np.random.normal(0, 1, [1, z_dimensions])\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n generated_image = sess.run(generated_image_output,\n feed_dict={z_placeholder: z_batch})\n generated_image = generated_image.reshape([28, 28])\n plt.imshow(generated_image, cmap='Greys')\n\ntf.reset_default_graph()\nbatch_size = 50\n\n# z_placeholder is for feeding input noise to the generator\nz_placeholder = tf.placeholder(tf.float32, [None, z_dimensions], name='z_placeholder')\n\n# x_placeholder is for feeding input images to the discriminator\nx_placeholder = tf.placeholder(tf.float32, shape=[None, 28, 28, 1], name='x_placeholder')\n\n# Gz holds the generated images\nGz = generator(z_placeholder, batch_size, z_dimensions)\n\n# Dx will hold discriminator prediction probabilities for the real MNIST images\nDx = discriminator(x_placeholder)\n\n# Dg will hold discriminator prediction probabilities for generated images\nDg = discriminator(Gz, reuse_variables=True)\n\nd_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dx, labels=tf.ones_like(Dx)))\nd_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels=tf.zeros_like(Dg)))\n\ng_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels=tf.ones_like(Dg)))\n\ntvars = tf.trainable_variables()\n\nd_vars = [var for var in tvars if 'd_' in var.name]\ng_vars = [var for var in tvars if 'g_' in var.name]\n\nprint([v.name for v in d_vars])\nprint([v.name for v in g_vars])\n\n# Train the discriminator\nd_trainer_fake = tf.train.AdamOptimizer(0.0003).minimize(d_loss_fake, var_list=d_vars)\nd_trainer_real = tf.train.AdamOptimizer(0.0003).minimize(d_loss_real, var_list=d_vars)\n\n# Train the generator\ng_trainer = tf.train.AdamOptimizer(0.0001).minimize(g_loss, var_list=g_vars)\n\n# From this point forward, reuse variables\ntf.get_variable_scope().reuse_variables()\n\ntf.summary.scalar('Generator_loss', g_loss)\ntf.summary.scalar('Discriminator_loss_real', d_loss_real)\ntf.summary.scalar('Discriminator_loss_fake', d_loss_fake)\n\nimages_for_tensorboard = generator(z_placeholder, batch_size, z_dimensions)\ntf.summary.image('Generated_images', images_for_tensorboard, 5)\nmerged = tf.summary.merge_all()\nlogdir = \"tensorboard/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\") + \"/\"\nwriter = tf.summary.FileWriter(logdir, sess.graph)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# Pre-train discriminator\nfor i in range(300):\n z_batch = np.random.normal(0, 1, size=[batch_size, z_dimensions])\n real_image_batch = mnist.train.next_batch(batch_size)[0].reshape([batch_size, 28, 28, 1])\n _, __, dLossReal, dLossFake = sess.run([d_trainer_real, d_trainer_fake, d_loss_real, d_loss_fake],\n {x_placeholder: real_image_batch, z_placeholder: z_batch})\n\n if i % 100 == 0:\n print(\"dLossReal:\", dLossReal, \"dLossFake:\", dLossFake)\n\n# Train generator and discriminator together\nfor i in range(100000):\n real_image_batch = mnist.train.next_batch(batch_size)[0].reshape([batch_size, 28, 28, 1])\n z_batch = np.random.normal(0, 1, size=[batch_size, z_dimensions])\n\n # Train discriminator on both real and fake images\n _, __, dLossReal, dLossFake = sess.run([d_trainer_real, d_trainer_fake, d_loss_real, d_loss_fake],\n {x_placeholder: real_image_batch, z_placeholder: z_batch})\n\n # Train generator\n z_batch = np.random.normal(0, 1, size=[batch_size, z_dimensions])\n _ = sess.run(g_trainer, feed_dict={z_placeholder: z_batch})\n\n if i % 10 == 0:\n # Update TensorBoard with summary statistics\n z_batch = np.random.normal(0, 1, size=[batch_size, z_dimensions])\n summary = sess.run(merged, {z_placeholder: z_batch, x_placeholder: real_image_batch})\n writer.add_summary(summary, i)\n\n if i % 100 == 0:\n # Every 100 iterations, show a generated image\n print(\"Iteration:\", i, \"at\", datetime.datetime.now())\n z_batch = np.random.normal(0, 1, size=[1, z_dimensions])\n generated_images = generator(z_placeholder, 1, z_dimensions)\n images = sess.run(generated_images, {z_placeholder: z_batch})\n plt.imshow(images[0].reshape([28, 28]), cmap='Greys')\n plt.show()\n\n # Show discriminator's estimate\n im = images[0].reshape([1, 28, 28, 1])\n result = discriminator(x_placeholder)\n estimate = sess.run(result, {x_placeholder: im})\n print(\"Estimate:\", estimate)\n# endregion\n","repo_name":"j32u4ukh/GanLearning","sub_path":"GAN_Learning.py","file_name":"GAN_Learning.py","file_ext":"py","file_size_in_byte":20728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17803180678","text":"\"\"\"\nThis script takes a GFF3 file and an annotation file,\nproducing a BED file for annotation of relevant transcripts.\nExample cmd (TODO: add example cmd once script is finalized):\n/home/rswilson1/anaconda3/envs/Annotation_app/bin/python\n/home/rswilson1/Documents/Programming_project/gene_annotation2bed.py\n-gff \"GCF_000001405.25_GRCh37.p13_genomic.gff\"\n-ig cancerGeneList_test.maf -ref \"hg19\" -f 5\n--assembly_summary \"GCF_000001405.25_GRCh37.p13_assembly_report.txt\"\n-o \"test6\"\n\"\"\"\n\nimport argparse\n\nimport argcomplete\nimport igv_report as igv\nimport numpy as np\nimport pandas as pd\nimport re\n\nimport gff2pandas as gffpd\n\npd.options.mode.chained_assignment = None # default='warn'\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"\n Parse command line arguments\n\n Parameters\n ----------\n None\n\n Returns\n -------\n args : Namespace\n Namespace of passed command line argument inputs\n \"\"\"\n parser = argparse.ArgumentParser(description=\"GFF Processing Script\")\n group1 = parser.add_mutually_exclusive_group(required=True)\n group1.add_argument(\"-gff\", \"--gff_file\", help=\"Path to GFF file\")\n group1.add_argument(\"-pkl\", \"--pickle\", help=\"Import gff as pickle file\")\n\n group2 = parser.add_mutually_exclusive_group(required=True)\n group2.add_argument(\n \"-ig\", \"--annotation_file\", help=\"Path to the annotation file (TSV)\"\n )\n group2.add_argument(\n \"-it\", \"--transcript_file\", help=\"Path to transcript annotation file\"\n )\n\n parser.add_argument(\n \"-o\", \"--output_file_suffix\", help=\"Output file suffix\", required=True\n )\n parser.add_argument(\n \"-ref\", \"--reference_genome\", help=\"Reference genome (hg19/hg38)\",\n required=True, choices=('hg19', 'hg38')\n )\n parser.add_argument(\n \"-fasta\",\n \"--reference_file\",\n help=\"Path to Reference genome fasta file for igv_reports\",\n )\n parser.add_argument(\n \"-f\", \"--flanking\", type=int, help=\"Flanking size\", required=True\n )\n parser.add_argument(\n \"--assembly_summary\", help=\"Path to assembly summary file\", required=True\n )\n # parser.add_argument('--report_name', help=\"Name for report\")\n argcomplete.autocomplete(parser)\n args = parser.parse_args()\n\n return args\n\n\ndef parse_gff(gff_file):\n \"\"\"\n Import GFF3 file and convert to pandas DataFrame.\n\n The GFF3 file is imported into a dataframe and then all the attributes\n in the attributes column are split into seperate columns.\n It then drops many of the additional fields from the attributes column\n which are not needed to reduce memory footprint.\n The dataframe is then filtered to only include entries which have the\n 'transcript_id' start with 'NM_'.\n\n Parameters\n ----------\n gff_file : gff2pandas object\n GFF object which contains the df and header.\n\n Returns\n -------\n transcripts_df : pandas DataFrame\n DataFrame containing the all the 'NM_' prefixed\n transcripts from the GFF3 file.\n\n Transformation from initial dataframe (gff df) to final dataframe:\n +--------------+------------+------------+-------+-----------+-------+\n | seq_id | source | type | start | end | score |\n +--------------+------------+------------+-------+-----------+-------+\n | NC_000001.10 | RefSeq | region | 1 | 249250621 | . |\n | NC_000001.10 | BestRefSeq | pseudogene | 11874 | 14409 | . |\n | NC_000001.10 | BestRefSeq | transcript | 11874 | 14409 | . |\n +--------------+------------+------------+-------+-----------+-------+\n +--------+-------+---------------------------------------------------+\n | strand | phase | attributes |\n +--------+-------+---------------------------------------------------+\n | + | . | attributes string... |\n | + | . | attributes string... |\n | + | . | attributes string... |\n +--------+-------+---------------------------------------------------+\n\n |\n |\n |\n V\n\n Transcripts dataframe:\n +--------------+------------+------+-------+-------+-------+--------+-------+\n | seq_id | source | type | start | end | score | strand | phase |\n +--------------+------------+------+-------+-------+-------+--------+-------+\n | NC_000001.10 | BestRefSeq | mRNA | 65419 | 71585 | . | + | . |\n | NC_000001.10 | BestRefSeq | exon | 65419 | 65433 | . | + | . |\n | NC_000001.10 | BestRefSeq | exon | 65520 | 65573 | . | + | . |\n +--------------+------------+------+-------+-------+-------+--------+-------+\n +-----------------------------------------------------+----------------------+\n | Dbxref | ID |\n +-----------------------------------------------------+----------------------+\n | GeneID:79501,Genbank:NM_001005484.2,HGNC:HGNC:14825 | rna-NM_001005484.2 |\n | GeneID:79501,Genbank:NM_001005484.2,HGNC:HGNC:14825 | exon-NM_001005484.2-1|\n | GeneID:79501,Genbank:NM_001005484.2,HGNC:HGNC:14825 | exon-NM_001005484.2-2|\n +-----------------------------------------------------+----------------------+\n +---------------------------------------------------+\n | attributes |\n +---------------------------------------------------+\n | attributes string... |\n | attributes string... |\n | attributes string... |\n +---------------------------------------------------+\n +-------+-------+----------------+---------+\n | gbkey | gene | transcript_id | hgnc_id |\n +-------+-------+----------------+---------+\n | mRNA | OR4F5 | NM_001005484.2 | 14825 |\n | mRNA | OR4F5 | NM_001005484.2 | 14825 |\n | mRNA | OR4F5 | NM_001005484.2 | 14825 |\n +-------+-------+----------------+---------+\n\n Produced using https://ozh.github.io/ascii-tables/.\n \"\"\"\n transcripts_gff = gffpd.read_gff3(gff_file)\n gff_df = transcripts_gff.attributes_to_columns()\n # drop columns that are not needed to reduce memory footprint\n gff_df = gff_df.drop(\n [\n \"Gap\", \"Is_circular\", \"Name\", \"Note\", \"Parent\", \"Target\", \"anticodon\",\n \"assembly_bases_aln\", \"assembly_bases_seq\", \"bit_score\", \"blast_aligner\",\n \"blast_score\", \"bound_moiety\", \"chromosome\", \"codons\", \"common_component\",\n \"consensus_splices\", \"country\", \"description\", \"direction\", \"e_value\",\n \"end_range\", \"exception\", \"exon_identity\", \"exon_number\", \"experiment\",\n \"feat_class\", \"filter_score\", \"for_remapping\", \"function\", \"gap_count\",\n \"gene_biotype\", \"gene_synonym\", \"genome\", \"hsp_percent_coverage\",\n \"identity\", \"idty\", \"inference\", \"inversion_merge_aligner\",\n \"isolation-source\", \"lxr_locAcc_currStat_120\", \"lxr_locAcc_currStat_35\",\n \"map\", \"matchable_bases\", \"matched_bases\", \"matches\", \"merge_aligner\",\n \"mobile_element_type\", \"mol_type\", \"not_for_annotation\", \"note\",\n \"num_ident\", \"num_mismatch\", \"number\", \"partial\", \"pct_coverage\",\n \"pct_coverage_hiqual\", \"pct_identity_gap\", \"pct_identity_gapopen_only\",\n \"pct_identity_ungap\", \"product\", \"product_coverage\", \"protein_id\",\n \"pseudo\", \"rank\", \"recombination_class\", \"regulatory_class\",\n \"rpt_family\", \"rpt_type\", \"rpt_unit_range\", \"rpt_unit_seq\",\n \"satellite\", \"splices\", \"standard_name\", \"start_range\", \"tag\"\n \"tissue-type\", \"transl_except\", \"transl_table\", \"weighted_identity\",\n ],\n axis=1,\n )\n\n # Apply extract_hgnc_id function to create 'hgnc_id' column\n gff_df[\"hgnc_id\"] = gff_df[\"Dbxref\"].apply(extract_hgnc_id)\n\n # set dtype for each column to reduce memory footprint\n dtype_mapping = {\n \"ID\": \"category\",\n \"transcript_id\": \"str\",\n \"hgnc_id\": \"Int64\",\n }\n\n gff_df = gff_df.astype(dtype_mapping)\n\n # Filter GFF DataFrame to select entries with 'NM' type\n transcripts_df = gff_df[gff_df[\"transcript_id\"].str.startswith(\"NM_\")]\n return transcripts_df\n\n\ndef convert_coordinates(coordinates_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Convert coordinates dataframe to BED format.\n\n Parameters\n ----------\n coordinates_df : pd.DataFrame\n coordinate format provided by the annotation file.\n ID annotation\n chr1:11874-14409 promoter_of_interest\n\n Returns\n -------\n pd.DataFrame\n Bed format dataframe with columns: chromosome, start,\n end, annotation, gene.\n\n +------------------+----------------------+\n | ID | annotation |\n +------------------+----------------------+\n | chr1:11874-14409 | promoter_of_interest |\n +------------------+----------------------+\n\n |\n |\n V\n\n +------------+-------+-------+----------------------+\n | chromosome | start | end | annotation |\n +------------+-------+-------+----------------------+\n | chr1 | 11874 | 14409 | promoter_of_interest |\n +------------+-------+-------+----------------------+\n \"\"\"\n # If the \"Coordinates\" column is empty, return an empty dataframe:\n if coordinates_df.empty:\n # Define the columns and their corresponding data types\n\n # Create an empty DataFrame with specified columns and data types\n empty_df = pd.DataFrame(\n columns=[\"chromosome\", \"start\", \"end\", \"annotation\", \"gene\"])\n print(\"No Coordinates found in the annotation file.\")\n return empty_df\n\n # create columns\n coordinates_df[[\"chr\", \"start\", \"end\", \"gene\"]] = \"\"\n\n try:\n # Split the \"Coordinates\" column by ':' and '-'\n coordinates_df[[\"chromosome\", \"start\", \"end\"]] = coordinates_df[\n \"Coordinates\"\n ].str.split(\"[:-]\", expand=True)\n coordinates_df[\"chromosome\"] = coordinates_df[\"chromosome\"].str.replace(\n r\"(?i)chr(omosome)?\", \"\", regex=True\n )\n coordinates_df = coordinates_df[\n [\"chromosome\", \"start\", \"end\", \"annotation\", \"gene\"]\n ]\n\n except Exception as err:\n print(f\"Error: {err}\")\n print(\"Please check the format of the coordinates in the annotation file.\")\n empty_df = pd.DataFrame(\n columns=[\"chromosome\", \"start\", \"end\", \"annotation\", \"gene\"])\n return empty_df\n\n try:\n coordinates_df[\"start\"] = coordinates_df[\"start\"].astype('Int64')\n coordinates_df[\"end\"] = coordinates_df[\"end\"].astype('Int64')\n except ValueError as e:\n print(f\"Error: {e}\")\n\n return coordinates_df\n\n\ndef parse_annotation_tsv(path: str,\n gff_transcripts_df: pd.DataFrame) -> tuple[pd.DataFrame,\n pd.DataFrame]:\n \"\"\"\n Parse an annotation TSV file and separate it into dataframes for HGNC IDs,\n Transcript IDs, and Coordinates, then merge them with a GFF dataframe.\n\n Parameters\n ----------\n path : str\n The file path to the TSV annotation file.\n gff_transcripts_df : pd.DataFrame\n A dataframe containing GFF information including transcript IDs.\n\n Returns\n -------\n Tuple[pd.DataFrame, pd.DataFrame]\n A tuple containing two dataframes:\n 1. The merged dataframe for HGNC IDs and transcripts. (hgnc_merged_df)\n 2. The coordinated dataframe for coordinates to be appended\n to a BED file later (coordinates_df).\n \"\"\"\n df = pd.read_csv(path, sep=\"\\t\")\n # Create masks for HGNC, Transcript, and Coordinates dataframes\n hgnc_mask = df[\"ID\"].str.startswith(\"HGNC:\") | df[\"ID\"].str.isnumeric()\n # Use regex to match transcript IDs/chromosome coordinates\n pattern_nm = r'^NM'\n transcript_mask = df[\"ID\"].str.contains(pattern_nm, case=False)\n pattern_chr = r'^(chr|chromosome)'\n coordinates_mask = df[\"ID\"].str.contains(pattern_chr, case=False)\n\n # Use masks to filter the original dataframe\n not_separated_rows = df[~(hgnc_mask | transcript_mask | coordinates_mask)]\n\n # for hgcnID and transcriptID don't exist\n if not_separated_rows.empty:\n print(\"All rows were separated successfully\")\n else:\n print(f\"These rows were not separated: \\n {not_separated_rows}\")\n\n # Create dataframes for HGNC IDs, Transcript IDs, and Coordinates\n hgnc_df = df[hgnc_mask]\n transcript_df = df[transcript_mask]\n coordinates_df = df[coordinates_mask]\n print(coordinates_df)\n # set dtype for each column\n dtype_mapping_hgnc = {\n \"ID\": \"Int64\",\n \"annotation\": \"category\",\n }\n\n dtype_mapping_transcript = {\n \"ID\": \"str\",\n \"annotation\": \"category\",\n }\n\n dtype_mapping_gff = {\n \"hgnc_id\": \"Int64\",\n }\n\n hgnc_df = hgnc_df.astype(dtype_mapping_hgnc)\n transcript_df = transcript_df.astype(dtype_mapping_transcript)\n gff_transcripts_df = gff_transcripts_df.astype(dtype_mapping_gff)\n # Rename columns for clarity\n hgnc_df = hgnc_df.rename(columns={\"ID\": \"hgnc_id\"})\n transcript_df = transcript_df.rename(columns={\"ID\": \"transcript_id\"})\n coordinates_df = coordinates_df.rename(columns={\"ID\": \"Coordinates\"})\n\n # Remove everything after '.' in the \"transcript_id\" column\n gff_transcripts_df[\"transcript_id\"] = (\n gff_transcripts_df[\"transcript_id\"].str.split(\".\").str[0]\n )\n transcript_df[\"transcript_id\"] = (\n transcript_df[\"transcript_id\"].str.split(\".\").str[0]\n )\n\n # Merge the HGNC and Transcript dataframes with gff dataframe based on the 'ID' column\n merged_hgnc_df = gff_transcripts_df.merge(\n hgnc_df, on=\"hgnc_id\", how=\"inner\")\n merged_transcript_df = gff_transcripts_df.merge(\n transcript_df, on=\"transcript_id\", how=\"inner\"\n )\n\n # Find the rows dropped during the merge\n dropped_hgnc_rows = hgnc_df[~hgnc_df[\"hgnc_id\"].isin(\n merged_hgnc_df[\"hgnc_id\"])]\n dropped_transcript_rows = transcript_df[\n ~transcript_df[\"transcript_id\"].isin(\n merged_transcript_df[\"transcript_id\"])\n ]\n if not dropped_hgnc_rows.empty:\n print(f\"Summary of dropped HGNC rows: \\n {dropped_hgnc_rows}\")\n else:\n print(\"All HGNC rows were merged successfully\")\n if not dropped_transcript_rows.empty:\n print(\n f\"Summary of dropped Transcript rows: \\n {dropped_transcript_rows}\")\n else:\n print(\"All Transcript rows were merged successfully\")\n # Concatenate the merged dataframes\n hgnc_merged_df = pd.concat([merged_hgnc_df, merged_transcript_df])\n\n # Coordinates dataframe split into columns\n coordinates_df = convert_coordinates(coordinates_df)\n\n return hgnc_merged_df, coordinates_df\n\n\ndef extract_hgnc_id(dbxref_str: str):\n \"\"\"\n Wrapper function to extract HGNC ID from a string of dbxrefs.\n\n Parameters\n ----------\n dbxref_str : str\n various ids separated by commas\n\n Returns\n -------\n int | None\n HGNC ID as an integer i.e. 427 for HGNC:427.\n Returns None if no HGNC ID found.\n\n Raises\n ------\n ValueError\n If more than one HGNC ID is found in the input string.\n \"\"\"\n if not dbxref_str:\n return None\n parts = dbxref_str.split(\",\")\n hgnc_ids = []\n for part in parts:\n match = re.search(r\"hgnc[:_][0-9]+\", part, re.IGNORECASE)\n if match:\n hgnc_id = int(match.group().replace(\"_\", \":\").split(\":\")[-1])\n hgnc_ids.append(hgnc_id)\n try:\n if len(hgnc_ids) > 1:\n raise ValueError(\"Multiple HGNC IDs found: \" +\n \", \".join(map(str, hgnc_ids)))\n elif hgnc_ids:\n return hgnc_ids[0]\n else:\n return None\n except ValueError as e:\n print(f\"Error: {e}\")\n return hgnc_ids[0]\n\n\ndef read_assembly_mapping(assembly_file: str):\n \"\"\"\n Reads in the associated assembly file and returns a dictionary mapping\n to find chromosome for each refseq accession.\n\n Parameters\n ----------\n assembly_file : str (file path to tsv)\n found at: https://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/001/405/GCF_000001405.25_GRCh37.p13/\n\n Returns\n -------\n dictionary\n mapping of refseq accession to chromosome\n \"\"\"\n accession_to_chromosome = {}\n assembly_df = pd.read_csv(assembly_file, sep=\"\\t\",\n comment=\"#\", header=None)\n assembly_df = assembly_df.dropna() # Drop rows with missing values\n # filter out na from chromosome column and turn accession and chromosome columns to dict\n assembly_df = assembly_df[~assembly_df[2].str.startswith(\"na\")]\n accession_to_chromosome = dict(zip(assembly_df[6], assembly_df[2]))\n\n return accession_to_chromosome\n\n\ndef map_accession_to_chromosome(accession: str, accession_to_chromosome: dict):\n \"\"\"\n Simple mapping function to find chromosome for a given refseq accession.\n Calls the accession_to_chromosome dictionary and extracts the chromosome.\n\n Parameters\n ----------\n accession: str\n str of the refseq accession\n\n accession_to_chromosome: dictionary\n dictionary mapping of refseq accession to chromosome\n\n Returns\n -------\n str value for the corresponding chromosome for the accession key.\n Or if not present in the dictionary, returns \"Unknown - {accession}\"\n \"\"\"\n return accession_to_chromosome.get(accession, f\"Unknown - {accession}\")\n\n\ndef parse_pickle(pickle_file: str):\n \"\"\"\n Parses a pickle file and returns a DataFrame of transcripts.\n\n Parameters\n ----------\n pickle_file : str (path to Pickle file)\n pickle file of a GFF DataFrame once parsed\n with columns from attributes_to_columns\n\n Returns\n -------\n transcripts_df: dataframe\n dataframe of transcripts with columns for attributes.\n Contains only transcripts with NM_ prefix.\n \"\"\"\n gff_df = pd.read_pickle(pickle_file)\n transcripts_df = gff_df[gff_df[\"transcript_id\"].fillna(\n \"\").str.startswith(\"NM_\")]\n return transcripts_df\n\n\ndef merge_overlapping(bed_df: pd.DataFrame):\n \"\"\"\n Function to merge overlapping regions in a bed file by annotation.\n\n Parameters\n ----------\n bed_df : dataframe\n bed file with columns: seq_id, start_flank,\n end_flank, hgnc_id, annotation, gene, chromosome\n\n Returns\n -------\n merged_df: dataframe\n dataframe of merged rows with columns: chromosome, start,\n end, annotation\n \"\"\"\n # Sort by chromosome, start, and end\n # This makes sure that overlapping regions are next to each other.\n\n bed_df = bed_df.sort_values(\n by=[\"annotation\", \"chromosome\", \"start_flank\", \"end_flank\"]\n )\n # Sort by first annotation then chromosome, start, and end.\n merged_rows = []\n\n current_row = bed_df.iloc[0]\n for _, row in bed_df.iterrows():\n if row[\"annotation\"] != current_row[\"annotation\"]:\n merged_rows.append(current_row) # Append the merged row\n current_row = row # Start a new potential merged row\n # Only rows with same annotation are merged\n if row[\"chromosome\"] != current_row[\"chromosome\"]:\n merged_rows.append(current_row)\n current_row = row\n # Only rows with same chromosome are merged.\n if row[\"start_flank\"] <= current_row[\"end_flank\"]:\n current_row[\"end_flank\"] = max(\n current_row[\"end_flank\"], row[\"end_flank\"])\n # Extend the end if overlapping\n else:\n merged_rows.append(current_row)\n current_row = row\n\n merged_rows.append(current_row) # Append the last merged row\n merged_df = pd.DataFrame(merged_rows)\n return merged_df\n\n\ndef config_igv_report(args: argparse.Namespace):\n \"\"\"\n Function to call igv report script with the correct parameters.\n Generates an IGV html report using generic handling.\n\n Parameters\n ----------\n args : argeparse object\n argeparse object with the following attributes:\n reference_genome, output_file_suffix, gff_file/pickle_file,\n annotation_file/transcript_file, assembly_file, and flanking.\n\n Returns\n -------\n None\n \"\"\"\n # assign vars.\n maf_file = f\"output_{args.reference_genome}_{args.output_file_suffix}.maf\"\n bed_file = f\"output_{args.reference_genome}_{args.output_file_suffix}.bed\"\n genome = args.reference_genome\n fasta_ref = args.reference_file\n info_columns = []\n title = f\"{args.output_file_suffix}_report\"\n output_file = f\"{title}.html\"\n print(\"Creating IGV report...\")\n\n print(\n f\"Bed file: {bed_file}\\nGenome: {genome}\\n\"\n f\"Info columns: {info_columns}\\nTitle: {title}\\nOutput: {output_file}\"\n )\n\n igv.create_igv_report(\n bed_file, maf_file, genome, fasta_ref, info_columns, title, output_file\n )\n\n print(\"IGV report created successfully!\")\n\n\ndef write_bed(annotation_df: pd.DataFrame,\n coordinates_df: pd.DataFrame,\n args: argparse.Namespace) -> None:\n \"\"\"\n Combines dataframes, extracts chromosome for HGNC_ids,\n and writes to MAF & BED file for IGV visualisation and VEP annotation.\n\n Parameters\n ----------\n annotation_df : pd.DataFrame\n A dataframe containing annotation information.\n coordinates_df : pd.DataFrame\n A dataframe containing coordinates information.\n args : Namespace\n A namespace containing command-line arguments and options.\n\n Outputs\n -------\n bed file: (file) bed file containing the relevant transcripts\n for annotation for visualisation in igv.\n \"\"\"\n # Create BED file with flanking regions\n print(\"Creating BED file\")\n print(\"Adding flanking regions\")\n annotation_df[\"start_flank\"] = annotation_df[\"start\"] - args.flanking\n annotation_df[\"end_flank\"] = annotation_df[\"end\"] + args.flanking\n bed_columns = [\n \"seq_id\",\n \"start_flank\",\n \"end_flank\",\n \"hgnc_id\",\n \"annotation\",\n \"gene\",\n ]\n bed_df = annotation_df[bed_columns]\n bed_df = bed_df.reindex()\n print(f\"Summary of BED file df before collapsing \\n {bed_df.head()}\")\n # Extract chromosome from seqid and create the 'chromosome' column\n accession_to_chromosome = read_assembly_mapping(args.assembly_summary)\n # Add a new column 'chromosome' by mapping accession to chromosome identifier\n bed_df.loc[:, \"chromosome\"] = bed_df[\"seq_id\"].apply(\n lambda x: map_accession_to_chromosome(x, accession_to_chromosome)\n )\n print(f\"Summary of BED file df before collapsing \\n {bed_df.head()}\")\n\n # Merge overlapping entries\n collapsed_df = merge_overlapping(bed_df).reset_index(drop=True)\n print(f\"Summary of BED file df after collapsing \\n {collapsed_df.head()}\")\n # Reorder the columns to match the BED format\n cols = [\"chromosome\", \"start_flank\", \"end_flank\", \"annotation\", \"gene\"]\n collapsed_df = collapsed_df[cols]\n # Rename columns\n new_column_names = {\"start_flank\": \"start\", \"end_flank\": \"end\"}\n collapsed_df.rename(columns=new_column_names, inplace=True)\n print(coordinates_df.head())\n collapsed_df = pd.concat(\n [collapsed_df, coordinates_df], axis=0, ignore_index=True)\n print(collapsed_df.head(10))\n # Write the collapsed data to an output file\n output_file_name_maf = (\n f\"output_{args.reference_genome}_{args.output_file_suffix}.maf\"\n )\n output_file_name_bed = (\n f\"output_{args.reference_genome}_{args.output_file_suffix}.bed\"\n )\n collapsed_df.to_csv(output_file_name_maf, sep=\"\\t\",\n header=True, index=False)\n collapsed_df.to_csv(output_file_name_bed, sep=\"\\t\",\n header=False, index=False)\n\n\ndef main():\n \"\"\"\n Main logic for script\n Collects arguments.\n Based on this imports the correct inputs and parses them.\n Creates a BED file for annotation of relevant transcripts.\n Creates an IGV report.\n \"\"\"\n args = parse_args()\n\n # read in pickle file if provided\n if args.pickle:\n transcripts_df = parse_pickle(args.pickle)\n print(\"Parsed pickle file\")\n else:\n # Parse gff file\n transcripts_df = parse_gff(args.gff_file)\n\n # Read the annotation file into a pandas DataFrame\n if args.annotation_file:\n annotation_df, coordinates_df = parse_annotation_tsv(\n args.annotation_file, transcripts_df\n )\n # Read the transcript annotation file\n elif args.transcript_file:\n annotation_df, coordinates_df = parse_annotation_tsv(\n args.transcript_file, transcripts_df\n )\n\n # Merge NM entries with matching HGNC IDs\n print(\"Merging annotation and gff dataframes\")\n write_bed(annotation_df, coordinates_df, args)\n\n # Create an IGV report\n config_igv_report(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"eastgenomics/gene_annotation2bed","sub_path":"gene_annotation2bed.py","file_name":"gene_annotation2bed.py","file_ext":"py","file_size_in_byte":25365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28917222508","text":"def bracketPoint(x, X):\n '''\n Inputs:\n X: a vector of numbers ordered from smallest to largest\n x: a real valued number\n \n Output:\n returns the index i such that X[i]X[-1]:\n raise ValueError('The point x is outside the suitable range of values')\n \n i = 0\n while x>X[i+1]:\n i+=1\n \n return i","repo_name":"DendrouLab/Photizo","sub_path":"preprocessing/.ipynb_checkpoints/bracketPoint-checkpoint.py","file_name":"bracketPoint-checkpoint.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17993084087","text":"'''Modul Hakkında bilgilendirme'''\n\nprint('Modul eklendi')\n\nnumber = 10\nnumber = [1,2,3]\n\nperson ={\n \"name\" : \"Ali\",\n \"age\" : \"25\",\n \"city\" : \"istanbul\"\n}\n\ndef func(x):\n '''fonksiyon için değerlendirme'''\n print(f'x : {x}')\n\n\nclass Person:\n def speak(self):\n print('AYEM SPEAK')","repo_name":"AlpVrn/Python","sub_path":"Modules/mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40134796715","text":"# Created by Egor Kostan.\n# GitHub: https://github.com/ikostan\n# LinkedIn: https://www.linkedin.com/in/egor-kostan/\n\n# Created by Egor Kostan.\n# GitHub: https://github.com/ikostan\n# LinkedIn: https://www.linkedin.com/in/egor-kostan/\n\nimport allure\n\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom tests.config import Config\nfrom utils.step_definition import step_definition\nfrom utils.screenshot import screenshot_on_fail\nfrom utils.clean_database import clean_database\nfrom utils.open_web_browser import open_web_browser\n\nfrom expected_results.users.base_user import BaseUser\nfrom expected_results.page_content.home_page_content import HomePageContent\nfrom expected_results.users.valid_users_templates.jane_doe import JaneDoe\n\nfrom page_locators.register_page_locator import RegisterPageLocator\nfrom page_object_models.register_page_model import RegisterPageModel\nfrom tests.e2e_tests.base_case.user_registration_case import UserRegistrationCase\nfrom expected_results.page_content.register_page_content import RegisterPageContent\n\n\n@allure.epic('Page Functionality')\n@allure.parent_suite('End To End')\n@allure.suite(\"User Registration\")\n@allure.sub_suite('Positive Tests')\n@allure.feature(\"Register Page\")\n@allure.story('Register Functionality')\n@screenshot_on_fail()\nclass TestUserRegistration(UserRegistrationCase):\n\n\t@classmethod\n\tdef setUpClass(cls):\n\n\t\tcls.config = Config()\n\t\tcls.client = BaseUser(JaneDoe)\n\t\tcls.page = None\n\n\t\twith allure.step(\"Initial data setup > clean DB\"):\n\t\t\tclean_database(cls.config)\n\n\t@classmethod\n\tdef tearDownClass(cls):\n\t\twith allure.step(\"Close Web Browser\"):\n\t\t\tif cls.page:\n\t\t\t\tcls.page.quit()\n\t\t\t\tcls.page = None\n\n\tdef setUp(self):\n\t\twith allure.step(\"Initial data setup: {}\".format(RegisterPageContent.URL)):\n\t\t\tself.page_model = RegisterPageModel\n\t\t\tself.page_content = RegisterPageContent\n\t\t\twith allure.step(\"Open web browser\"):\n\t\t\t\tself.page = open_web_browser(config=self.config,\n\t\t\t\t page_model=self.page_model,\n\t\t\t\t page_content=self.page_content)\n\n\tdef tearDown(self):\n\t\twith allure.step(\"Close current tab\"):\n\t\t\tif self.page:\n\t\t\t\tself.page.quit()\n\t\t\t\tself.page = None\n\n\tdef test_user_registration(self):\n\t\tallure.dynamic.description(\"\"\"\n User registration test case:\n 1. Open 'Register' web page\n 2. Fill out user personal data\n 4. Verify that each data item appears in relevant field\n 5. Hit 'Register' button\n 6. Verify 'Welcome' message\n 7. Verify that \"Account Services\" menu is present\n 8. Log Out\n 9. Verify that \"Account Services\" menu is not present\n 10. Close web browser\n \"\"\")\n\t\tallure.dynamic.title(\"User registration > Positive test\")\n\t\tallure.dynamic.severity(allure.severity_level.CRITICAL)\n\n\t\t# Register a new user:\n\t\tself.fill_out_user_data()\n\n\t\twith allure.step('Hit \"Register\" button'):\n\t\t\tprint('Hit \"Register\" button')\n\t\t\tself.page = self.page.hit_register_btn()\n\n\t\twith allure.step('Wait for \"Welcome\" message'):\n\t\t\tWebDriverWait(self.page.driver,\n\t\t\t self.page.explicit_wait_time).until(\n\t\t\t\tEC.presence_of_element_located(RegisterPageLocator.HEADER))\n\n\t\tstep_definition(self,\n\t\t expected=RegisterPageContent.WELCOME_MESSAGE['header'] + self.client.username,\n\t\t actual=self.page.welcome_header,\n\t\t act=None,\n\t\t step_description='Verify \"Welcome\" header',\n\t\t click=False)\n\n\t\tstep_definition(self,\n\t\t expected=RegisterPageContent.WELCOME_MESSAGE['message'],\n\t\t actual=self.page.welcome_message,\n\t\t act=None,\n\t\t step_description='Verify \"Welcome\" message',\n\t\t click=False)\n\n\t\tstep_definition(self,\n\t\t expected=True,\n\t\t actual=self.page.account_services_menu_is_visible,\n\t\t act=None,\n\t\t step_description='Verify that \"Account Services\" menu is present',\n\t\t click=False)\n\n\t\t# Logout\n\t\twith allure.step('Hit \"Log Out\" link'):\n\t\t\tself.page = self.page.hit_log_out_button()\n\n\t\t# Post Logout validation\n\t\tstep_definition(self,\n\t\t expected=self.config.base_url + HomePageContent.URL,\n\t\t actual=self.page.url,\n\t\t act=None,\n\t\t step_description='Verify URL',\n\t\t click=False)\n\n\t\tstep_definition(self,\n\t\t expected=False,\n\t\t actual=self.page.account_services_menu_is_visible,\n\t\t act=None,\n\t\t step_description='Verify that \"Account Services\" menu is not present',\n\t\t click=False)\n","repo_name":"ikostan/ParaBankSeleniumAutomation","sub_path":"tests/e2e_tests/user_registration/positive/user_registration_test.py","file_name":"user_registration_test.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"23238653939","text":"import unittest\nclass Solution(object):\n def singleNumber(self, nums):\n result = 0\n for num in nums:\n result = num ^ result\n return result\n\nclass Test(unittest.TestCase):\n def test(self):\n self._test([1,2,1], 2)\n\n def _test(self, Li, expected):\n actual = Solution().singleNumber(Li)\n self.assertEqual(actual, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"WaltXin/py_leet","sub_path":"p136.py","file_name":"p136.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26349407684","text":"import argparse\nimport os\nimport sys\n\nfrom tsp_plotter.solution import solution_plotter\nfrom tsp_plotter.problem import problem_generator\nfrom tsp_plotter.output_files import OutputFiles\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n prog=\"salesman\",\n description=\"Travelling salesman - problem generator / solution plotter\",\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n metavar=\"DIR\",\n default=\"output\",\n dest=\"output_dir\",\n help=\"output directory [default: output]\",\n )\n subparsers = parser.add_subparsers(\n help=\"Available commands\",\n description=\"Command help\",\n dest=\"subcommand\",\n required=True,\n )\n\n # Create a problem\n\n create_parser = subparsers.add_parser(\n \"create\",\n help=\"Generate a problem\",\n )\n create_parser.add_argument(\n \"num_points\",\n type=int,\n help=\"Number of points in the problem\",\n )\n create_parser.add_argument(\n \"-a\",\n \"--arc\",\n help=\"Points form an arc\",\n action=\"store_true\",\n )\n\n # Plot a solution\n\n plot_parser = subparsers.add_parser(\n \"plot\",\n help=\"Plot a solution\",\n )\n plot_parser.add_argument(\n \"solution\",\n help=\"Input file containing the solution path\",\n )\n plot_parser.add_argument(\n \"problem\",\n help=\"Input file containing the points defining the problem\",\n )\n\n args = parser.parse_args()\n\n if len(args.output_dir) > 0:\n path = args.output_dir\n if not os.path.exists(path):\n os.makedirs(path)\n\n output_files = OutputFiles(args.output_dir)\n\n if args.subcommand == \"create\":\n problem_generator.generate_problem(\n args.num_points,\n args.arc,\n output_files.problem,\n output_files.problem_image,\n output_files.graph,\n )\n\n elif args.subcommand == \"plot\":\n solution_plotter.plot_solution(\n args.solution, args.problem, output_files.solution_image\n )\n else:\n print(f\"unknown command: {args.subcommand}\", file=sys.stderr)\n","repo_name":"damonf/travelling-salesman-plotter","sub_path":"src/tsp_plotter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18305772375","text":"import logging\n\nimport numpy as np\nimport pandas as pd\nimport yfinance as yf\nfrom tqdm import tqdm\n\nfrom back_testing import BackTester\nfrom technical_strategies import (\n MACDStrategy,\n BuyHoldStrategy,\n DonchainStrategy\n)\nfrom utils import (\n annualized_return,\n annualized_volatility,\n sharpe_ratio,\n max_drawdown,\n z_score,\n p_value\n)\n\n\ndef main(investment=20000, symbol=\"TSLA\", log_path=None, strategy=None):\n \"\"\"\n back testing\n :param investment: int; initial investment\n :param log_path: str, default=None; path of logging file\n :param symbol: str; ticker name\n :param strategy: abc.ABCMeta; trading strategy\n :return:\n \"\"\"\n try:\n if log_path:\n std_filename = log_path\n else:\n std_filename = \".\\\\order_trace.log\"\n logging.basicConfig(filename=std_filename, filemode='a',\n format='%(asctime)s - %(message)s', level=logging.DEBUG)\n logging.info('')\n logging.info(f'Entering strategy')\n\n # select price volume data of facebook from yahoo finance\n ticker = yf.Ticker(symbol)\n\n # initialize single input data (of 1 min frequency)\n df_ticker = ticker.history(\n period='max',\n interval='1d',\n start=None,\n end=\"2022-09-20\",\n actions=True,\n auto_adjust=True, # Adj Close\n back_adjust=False\n ).drop([\"Dividends\", \"Stock Splits\"], axis=1)\n\n # initialize evaluation matrix for strategy performance\n matrix = pd.DataFrame(\n columns=[\"signal\", \"position\", \"close_price\", \"cash\", \"holdings\", \"total\", \"pnl\", \"cum_return\"]\n )\n\n # initialize dataframes for MACD strategy\n df_ticker[\"ema_12\"] = df_ticker[\"Close\"].ewm(span=12, adjust=True).mean()\n df_ticker[\"ema_26\"] = df_ticker[\"Close\"].ewm(span=26, adjust=True).mean()\n df_ticker[\"macd_line\"] = df_ticker[\"ema_12\"] - df_ticker[\"ema_26\"]\n df_ticker[\"signal_line\"] = df_ticker[\"macd_line\"].ewm(span=9, adjust=True).mean()\n\n # initialize dataframes for Donchain Channel strategy\n df_ticker[\"high_line\"] = df_ticker[\"High\"].rolling(20).max()\n df_ticker[\"low_line\"] = df_ticker[\"Low\"].rolling(20).min()\n\n df_ticker = df_ticker.dropna().reset_index()\n\n # initialize\n back_tester = BackTester(strategy=strategy(len(df_ticker)), cash=investment)\n\n # initialize single input data (of daily frequency)\n for date in tqdm(df_ticker.Date, postfix=\"Strategy Operating\"):\n price_data = dict(df_ticker[df_ticker.Date == date].iloc[0])\n\n # set buy or sell signal and run the strategy\n action = back_tester.order_instruction(price_data)\n back_tester.order_execution(price_data, action)\n\n # fill the data\n matrix.loc[date] = [\n action,\n back_tester.list_position[-1],\n price_data[\"Close\"],\n back_tester.list_cash[-1],\n back_tester.list_holdings[-1],\n back_tester.list_total[-1],\n back_tester.list_total[-1] - investment,\n (back_tester.list_total[-1] - investment) / investment\n ]\n\n # indicators for the strategy\n ret = matrix[\"pnl\"].diff() / investment\n signal_size = np.minimum(\n len(matrix[matrix.signal == \"sell\"]),\n len(matrix[matrix.signal == \"buy\"])\n )\n indicators = pd.DataFrame({\n \"annualized_return\": [annualized_return(ret)],\n \"annualized_volatility\": [annualized_volatility(ret)],\n \"total_return\": [matrix[\"cum_return\"].iloc[-1]],\n \"annual_sharpe_ratio\": [sharpe_ratio(ret)],\n \"maximum_drawdown\": [max_drawdown(ret)],\n \"number_round_trip\": [signal_size],\n \"z_score\": [z_score(ret, signal_size * 2)],\n \"p_value\": [p_value(ret, signal_size * 2)]\n })\n\n logging.info(f'Leaving strategy')\n return df_ticker, matrix, indicators # , sharpe_ratio\n\n except FileNotFoundError as nf_error:\n logging.error(f'Leaving strategy incomplete with errors')\n return f'ERROR: {str(nf_error)}'\n except KeyError as key_error:\n logging.error(f'Leaving strategy incomplete with errors')\n return f'ERROR: {key_error.args[0]}'\n except Exception as gen_exc:\n logging.error(f'Leaving strategy incomplete with errors')\n raise gen_exc\n\n\nif __name__ == \"__main__\":\n df_macd, matrix_macd, idx_macd = main(\n investment=20000,\n symbol=\"TSLA\",\n log_path=None,\n strategy=MACDStrategy\n ) # , sr_macd\n\n idx_macd.to_csv(\"indicators.csv\")\n matrix_macd.to_csv(\"matrix.csv\")\n\n df_bh, matrix_bh, idx_bh = main(\n investment=20000,\n symbol=\"TSLA\",\n log_path=None,\n strategy=BuyHoldStrategy\n )\n\n df_dc, matrix_dc, idx_dc = main(\n investment=20000,\n symbol=\"TSLA\",\n log_path=None,\n strategy=DonchainStrategy\n )\n","repo_name":"ChopinYan/NYU-MFE-Projects","sub_path":"FRE_GY_7251/Technical_Trading_Strategy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31447285775","text":"import tkinter as tk\nfrom tkinter import ttk\n\nfrom encrypter.encrypter import encrypt_file, decrypt_file\n\n\ndef main():\n # UI\n root = tk.Tk()\n root.title(\"Cifrador / Descifrador de archivos de texto\")\n root.geometry(\"420x300\")\n\n style = ttk.Style()\n style.configure(\"TButton\", font=('Helvetica', 14), padding=(20, 10))\n encrypt_button = ttk.Button(root, text=\"Carga Archivo para cifrado\", style=\"TButton\", command=lambda: encrypt_file(root))\n encrypt_button.pack(expand=True,padx=10, pady=10)\n\n decrypt_button = ttk.Button(root, text=\"Carga Archivo a descrifrar.\", style=\"TButton\", command=lambda: decrypt_file(root))\n decrypt_button.pack(expand=True, padx=10, pady=10)\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"contracamilo/python_txt_file_cypher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33416142420","text":"import pygame, sys\nfrom pygame.locals import *\n\nclass Termometro():\n \n def __init__(self):\n self.costume = pygame.image.load('imagenes/termo1.png')\n \n def convertir(self, grados, toUnidad):\n resultado = 0\n if toUnidad == 'F':\n resultado = grados * 9/5 + 32\n elif toUnidad == 'C':\n resultado = (grados - 32) * 5/9\n else:\n resultado = grados\n \n return \"{:10.2f}\".format(resultado) \n\nclass Selector():\n __tipoUnidad = None\n \n def __init__(self, unidad='C'):\n self.__costumes = []\n self.__costumes.append(pygame.image.load('imagenes/posiF.png'))\n self.__costumes.append(pygame.image.load('imagenes/posiC.png'))\n \n self.__tipoUnidad = unidad\n \n def costume(self):\n if self.__tipoUnidad == 'F':\n return self.__costumes[0]\n else:\n return self.__costumes[1]\n # como hemos creado __tipoUnidad como atributo privado, para acceder a él\n # necesitamos un getter\n def unidad(self):\n return self.__tipoUnidad\n \n # para cambiar barra de F a C o C a F\n def change(self):\n if self.__tipoUnidad == 'F':\n self.__tipoUnidad = 'C'\n else:\n self.__tipoUnidad = 'F'\n \n \n\nclass NumberInput():\n __value = 0\n __strValue = ''\n __position = [0, 0]\n __size = [0, 0]\n __pointsCount = 0 \n \n def __init__(self, value =0):\n self.__font = pygame.font.SysFont('Arial', 24)\n # gestionar error\n self.value(value)\n \n '''\n No queremos repetirnos/este código lo tenemos en value:\n try:\n self.__value = int(value)\n self.__strValue = str(value)\n except:\n pass\n '''\n def on_event(self, event):\n if event.type == KEYDOWN:\n if event.unicode.isdigit() and len(self.__strValue) < 10 or (event.unicode == '.' and self.__pointsCount == 0): # para poder introducir decimales\n self.__strValue += event.unicode# para poder añadir nums al rect.\n self.value(self.__strValue)# actualiza el valor de value\n if event.unicode == '.':\n self.__pointsCount += 1\n elif event.key == K_BACKSPACE:\n if self.__strValue[-1] == '.':\n self.__pointsCount -= 1 # si el pointsCount es != 0 entonces viene aquí y se resta uno. Así que no se pueden meter más de 1 decimal\n self.__strValue = self.__strValue[:-1]# para poder borrar con la tecla borrado\n self.value(self.__strValue)\n \n \n def render(self):\n # renderizar el bloque de texto/rectángulo transparente/como cuando creamo screen y luego hacemos blit\n textBlock = self.__font.render(self.__strValue, True, (74, 74, 74))\n # queremos que el bloque sea un rectángulo\n # renderizamos el rectángulo\n rect = textBlock.get_rect()\n rect.left = self.__position[0]\n rect.top = self.__position[1]\n rect.size = self.__size\n \n '''\n return {\n 'fondo': rect,\n 'texto': textBlock\n }\n '''\n \n return(rect, textBlock)\n \n # validaciones/setter para cambiar los valores de los atributos:\n def value(self, val=None):\n if val == None:\n return self.__value\n else:\n val = str(val)\n try:\n self.__value = float(val)\n self.__strValue = val\n if '.' in self.__strValue:\n self.__pointsCount = 1\n else:\n self.__pointsCount = 0\n \n except:\n pass\n \n def width(self, val=None):\n if val == None:\n return self.__size[0]\n else:\n try:\n self.__size[0] = int(val)\n except:\n pass\n \n def height(self, val=None):\n if val == None:\n return self.__size[1]\n else:\n try:\n self.__size[1] = int(val)\n except:\n pass\n \n \n def size(self, val=None):\n if val == None:\n return self.__size\n else:\n try:\n self.__size = [int(val[0]), int(val[1])]\n except:\n pass\n \n def posX(self, val=None):\n if val == None:\n return self.__position[0]\n else:\n try:\n self.__position[0] = int(val)\n except:\n pass\n \n def posY(self, val=None):\n if val == None:\n return self.__position[1]\n else:\n try:\n self.__position[1] = int(val)\n except:\n pass\n \n \n def pos(self, val=None):\n if val == None:\n return self.__position\n else:\n try:\n self.__position = [int(val[0]), int(val[1])]\n except:\n pass\n \nclass mainApp():\n termómetro = None\n entrada = None\n selector = None\n \n def __init__(self):\n # Aquí tengo que llamar a todas las clases que he definido anteriormente como atributos\n self.__screen = pygame.display.set_mode((290, 415))\n pygame.display.set_caption('Termómetro')\n \n self.termometro = Termometro()\n self.entrada = NumberInput('0')\n self.entrada.pos((106, 58))\n self.entrada.size((133, 28))\n \n self.selector = Selector()\n \n def __on_close(self):\n pygame.quit()\n sys.exit()\n \n def start(self):\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n self.__on_close()\n \n self.entrada.on_event(event) # me pasas el evento a on_event entrada \n \n # cambia de unidades y cambia el valor del rectángulo\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.selector.change()\n grados = self.entrada.value()\n nuevaUnidad = self.selector.unidad()\n temperatura = self.termometro.convertir(grados, nuevaUnidad)\n print(temperatura)\n self.entrada.value(temperatura)\n \n #pintamos el fondo de pantalla:\n self.__screen.fill((244, 236, 203))\n \n # pintamos el termómetro en su posición \n self.__screen.blit(self.termometro.costume, (50, 34))\n \n # pintamos el cuadro de texto\n text = self.entrada.render() # obtenemos rectángulo blanco y foto de texto y lo asignamos a text\n pygame.draw.rect(self.__screen, (255, 255, 255), text[0]) # creamos el rectángulo blanco con sus datos (posición y tamaño) text[0]\n self.__screen.blit(text[1], self.entrada.pos())# pintamos la foto del texto (text[1])\n \n # pintamos el selector\n self.__screen.blit(self.selector.costume(), (112,153))\n \n pygame.display.flip()\n \nif __name__ == '__main__':\n pygame.font.init()\n app = mainApp()\n app.start()\n","repo_name":"yalaska04/Termometro","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7391,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1134046997","text":"# establishes the whats to be rendered on the pages when called by the hows accessible through the templates\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.shortcuts import render\nfrom .models import *\nfrom .forms import EditorForm\n\n# Create your views here.\ndef landingpage(request):\n artifacts = Artifact.objects.all().order_by('artifact_id')\n return render(request=request, template_name='landingpage.html', context={'artifacts': artifacts})\n\ndef ourstory(request):\n return render(request,'ourstory.html')\n\ndef gallery(request):\n return render(request,'gallery.html')\n\ndef addartifact(request):\n if request.method == 'GET':\n form = EditorForm()\n return render(request=request,template_name='addartifact.html', context={'form': form})\n\n if request.method == 'POST':\n form = EditorForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data['title']\n description = form.cleaned_data['description']\n img_link = form.cleaned_data['img_link']\n\n artifacts = Artifact.objects.create(title=title, description=description, img_link=img_link)\n return HttpResponseRedirect(reverse('landingpage'))\n\ndef editartifact(request, artifact_id):\n if request.method == 'GET':\n artifact = Artifact.objects.get(pk=artifact_id)\n form = EditorForm(initial={'title': artifact.title, 'description': artifact.description, 'img_link': artifact.img_link })\n return render(request=request, template_name='editartifact.html', context={'form': form, 'artifact_id': artifact_id})\n\n if request.method == 'POST':\n form = EditorForm(request.POST)\n if form.is_valid():\n if 'save' in request.POST:\n title = form.cleaned_data['title']\n description = form.cleaned_data['description']\n img_link = form.cleaned_data['img_link']\n artifacts = Artifact.objects.filter(pk=artifact_id)\n artifacts.update(title=title, description=description, img_link=img_link)\n elif 'deleteartifact' in request.POST:\n Artifact.objects.filter(pk=artifact_id).delete()\n return HttpResponseRedirect(reverse('landingpage'))\n\n\n\n\n","repo_name":"lavoing5762/Inheritance","sub_path":"inheritance_project/inheritance/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2759296497","text":"from contextlib import contextmanager\n\nimport psycopg2\nfrom elasticsearch import Elasticsearch\nfrom psycopg2.extras import RealDictCursor\n\n\n@contextmanager\ndef elastic_search_connection(dsn: str):\n es_connection = Elasticsearch(dsn)\n try:\n yield es_connection\n finally:\n es_connection.close()\n\n\n@contextmanager\ndef postgres_connection(dsn: dict):\n connection = psycopg2.connect(**dsn, cursor_factory=RealDictCursor)\n connection.set_session(autocommit=True)\n try:\n yield connection\n finally:\n connection.close()\n","repo_name":"AlexanderPRM/Cinema","sub_path":"films_api/etl/etl_persons/utils/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"27135328758","text":"# Задача 5 VERY HARD SORT необязательная\n# Задайте двумерный массив из целых чисел. Количество строк и столбцов задается \n# с клавиатуры. Отсортировать элементы по возрастанию слева направо и сверху вниз.\n\n# Например, задан массив:\n# 1 4 7 2\n# 5 9 10 3\n# После сортировки\n# 1 2 3 4\n# 5 7 9 10\n\ndef create_array():\n \"\"\"\n Функция для создания массива определенного размера\n и вводом элементов массива с клавиатуры.\n \"\"\"\n size = int(input('Введите размер массива: '))\n array_input = [] \n for x in range(size): \n array_input.append([int(y) for y in input('Введите элементы массива через пробел: ').split()]) \n print(f'Исходный массив: {array_input}')\n return array_input\n\ndef sorted_2D_array():\n \"\"\"\n Функция сортировки 2D масива по возрастанию.\n \"\"\"\n array = create_array()\n array_sorted = sorted(array[0] + array[1])\n len_ = int((len(array_sorted)/2))\n output_array = array_sorted[:len_], array_sorted[len_:]\n print (f'Отсортированный массив: {output_array}\\n')\n return output_array\n\nsorted_2D_array()","repo_name":"TrofimovVladislav/Python_GB","sub_path":"PythonSeminars/HW/HW_Seminar_1/hw_sem_1_task_5.py","file_name":"hw_sem_1_task_5.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9287773339","text":"import requests\n\nfrom config import settings\nfrom db_management.db_commands import insert_streams_into_db\nfrom models.stream import Stream\n\nheaders_token = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\ndata = {\n \"client_id\": settings.CLIENT_ID,\n \"client_secret\": settings.CLIENT_SECRET,\n \"grant_type\": settings.GRANT_TYPE,\n}\nbase_url = \"https://api.twitch.tv/helix/streams?first=100\"\n\n\nasync def parse_twitch():\n response = requests.post(\n \"https://id.twitch.tv/oauth2/token\", headers=headers_token, data=data\n ).json()\n access_token = \"Bearer \" + response.get(\"access_token\")\n headers = {\n \"Authorization\": access_token,\n \"Client-Id\": \"j0yobo10cfe37ajw4eipnu2dhjoxhy\",\n }\n response2 = requests.get(\n base_url,\n headers=headers,\n ).json()\n cursor = response2.get(\"pagination\").get(\"cursor\")\n test_num = 3 # for testing\n for item in response2.get(\"data\"):\n validated_object = dict(Stream.parse_obj(item))\n await insert_streams_into_db(validated_object)\n while test_num > 0: # use here while cursor is not None\n response = requests.get(\n f\"{base_url}&after={cursor}\",\n headers=headers,\n ).json()\n for item in response.get(\"data\"):\n validated_object = dict(Stream.parse_obj(item))\n await insert_streams_into_db(validated_object)\n cursor = response.get(\"pagination\").get(\"cursor\")\n test_num -= 1\n","repo_name":"Povladislav/parser","sub_path":"parsing_twitch.py","file_name":"parsing_twitch.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10578303357","text":"import numpy as np\n\n#\n# FUNCTIONS\n#\n\n# Update the interval dict for row r after visiting column c\ndef update_row(r, c):\n len1_l = I_R[r].get(c-1, 0)\n len2_r = I_R[r].get(c+1, 0)\n I_R[r][c - len1_l] = len1_l + len2_r + 1\n I_R[r][c + len2_r] = len1_l + len2_r + 1\n\n# Update the interval dict for column c after visiting row r\ndef update_col(r, c):\n len1_l = I_C[c].get(r-1, 0)\n len2_r = I_C[c].get(r+1, 0)\n I_C[c][r - len1_l] = len1_l + len2_r + 1\n I_C[c][r + len2_r] = len1_l + len2_r + 1\n\n# Perform a step in move direction and update the intervals\ndef step(r, c, move):\n if move == 'N':\n step = 1 + I_C[c].get(r-1, 0)\n nr, nc = r - step, c\n if move == 'E':\n step = 1 + I_R[r].get(c+1, 0)\n nr, nc = r, c + step\n if move == 'S':\n step = 1 + I_C[c].get(r+1, 0)\n nr, nc = r + step, c\n if move == 'W':\n step = 1 + I_R[r].get(c-1, 0)\n nr, nc = r, c - step\n update_col(r, c)\n update_row(r, c)\n return nr, nc\n\n#\n# INPUT\n#\n\n# Read number of test cases\nT = int(input())\n\nfor t in range(1, T + 1):\n # Read variables\n ################\n N, R, C, SR, SC = map(int, input().split())\n moves = input()\n\n # Use 0,...,N-1 indices\n er = SR - 1\n ec = SC - 1\n\n # Save blocked intervals for each row and column\n # For each interval save start and end index together with interval length\n # As dict entries {start: length, end: length}\n I_R = [{} for _ in range(R)]\n I_C = [{} for _ in range(C)]\n\n # Calc output\n #############\n for m in moves:\n er, ec = step(er, ec, m)\n\n # Print result\n ##############\n print(\"Case #{}: {} {}\".format(t, er+1, ec+1))\n","repo_name":"alex-obi/kick-start","sub_path":"2019/C/walk2.py","file_name":"walk2.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23111710068","text":"\n# function for ANOVA this functions passes parameters to the main anova function in eelbrain\n\n\nimport os\nimport os\nfrom eelbrain import *\nimport scipy.io\nimport string\n#from TestTabel import *\nimport itertools\nimport eelbrain as e\nfrom PyQt5.QtWidgets import *\n\n\ndef ANOVA22BESASpatial(root, condition1Name, condition2Name, startTime, endTime, presTim, PThresh,\n minTemporalCluster, numPerm, anlsysTail, ROI):\n\n\n dat_files = []\n conditions = []\n subjects = []\n Allconditions = []\n completed = 0\n ROI = 'leftba21+leftba20'\n root = '/Users/esmamansouri/Documents/Data/Adina_AG'\n for condition in ['sat_ev_lorel', 'sat_ev_hirel','sat_no_ev_lorel', 'sat_no_ev_hirel']:\n\n for name in os.listdir(root + '/' + condition):\n if 'dat' in name:\n dat = load.besa.dat_file(root + '/' + condition + '/' + name)\n dat_files.append(dat)\n conditions.append(condition)\n subjects.append(name[:5])\n Allconditions.append(condition)\n\n\n Allconditions1 = Factor(Allconditions, tile=len(subjects)/len(Allconditions))\n\n subjects.sort()\n #conditions.sort()\n\n ds = Dataset()\n #ds = Dataset((('subject', Factor(subjects)), ('conditions', Factor(conditions)),('dat',combine(dat_files[5:]))))\n ds['subject'] = Factor(subjects)\n ds['conditions'] = Factor(Allconditions1)\n #test = Factor(subjects)\n #ds['subject'] = Factor(subjects)\n ROIValues = ROI\n\n # open mat file to search for sources\n\n RegionNamesOldBESA = scipy.io.loadmat('regionnamesOldBESA.mat')\n RegionNames = ['leftba1-3','leftba4','leftba5','leftba6','leftba7','leftba8','leftba9','leftba10','leftba11','leftba17','leftba18','leftba19','leftba20'\n ,'leftba21','leftba22','leftba28','leftba36','leftba37','leftba38','leftba39','leftba40','leftba42','leftba44-45','leftba46'\n 'leftba47','leftcerebellartonsil','rightba1-3','rightba4','rightba5','rightba6','rightba7','rightba8','rightba9','rightba10','rightba11'\n ,'rightba17','rightba18','rightba19','rightba20','rightba21','rightba22','rightba28','rightba36','rightba37','rightba38','rightba39'\n 'rightba40','rightba42','rightba44-45','rightba46','rightba47','rightcerebellartonsil']\n\n SourceObjectBESA = scipy.io.loadmat('sourcesOldBESA.mat')\n RegionSourcesListAll = []\n rois = []\n #split regions to get sources\n if ROIValues.find('/') != -1 :\n with open(ROIValues) as f:\n rois = f.readlines()\n ROIValues = rois[0]\n\n\n\n rois = ROIValues.split('+')\n\n for roi in rois:\n\n\n\n ROIIndex = RegionNames.index(roi)\n\n RegionSources = SourceObjectBESA.get('test2')[ROIIndex]\n RegionSources = RegionSources[0]\n\n #a = SourceObjectBesa[0:1]['analysisRegion']\n\n\n\n RegionSourcesList = RegionSources.tolist()\n\n RegionSourcesListAll.append( RegionSourcesList[0])\n\n\n\n ROISources = list(itertools.chain(*RegionSourcesListAll))\n ds['dat'] = combine(dat_files) #[5:])\n\n #print ( ds[:5])\n #ds['dat']\n #dat_files[0]\n #test = ds['dat'].get('source')\n\n\n #get sources from roi file\n tails = 0\n #FOR TEMPRAL ONLY\n #y = ds['dat'].sub(source=ROISources).mean('source')\n #print( table.frequencies('conditions', 'subject', ds=ds))\n y = ds['dat']\n\n\n\n t = table.frequencies('conditions', 'subject', ds=ds)\n anlsysTail = 'both'\n if anlsysTail == 'both':\n tails = 0\n elif anlsysTail == 'right':\n tails = 1\n\n elif anlsysTail == 'left':\n tails = -1\n\n\n #QMessageBox.about(None, 'Data extraction complete, now running the statistical test','Analysis Complete Results are displayed in the main window')\n '''completed = 0\n Results = \"\"\n while completed< 100:\n\n completed += 0.0001\n progress.setValue(completed)\n\n progress.value = completed\n progress.isTextVisible = \"True\"\n '''\n # Results = testnd.ttest_rel(y, 'conditions', match='subject', tail=0,ds=ds, tstart=(int(startTime)+int(presTim))/1000, tstop=(int(endTime)+int(presTim))/1000, samples=int(numPerm), pmin=float(PThresh), mintime = int(minTemporalCluster)/1000)\n\n #A = Factor(range(2), repeat=42, name='A')\n #B = Factor(range(2), repeat=42, name='B')\n\n '''Enter the data for the first level of f1, and the first level of f2.\n\n Enter the data for the first level of f1, and the second level of f2.\n\n Enter the data for the second level of f1, and the first level of f2.\n\n Enter the data for the second level of f1, and the second level of f2.\n\n ['sat_ev_lorel', 'sat_ev_hirel','sat_no_ev_lorel', 'sat_no_ev_hirel']:'''\n subject = ds['subject']\n A = ds['conditions'] #Factor('abc', 'A', repeat=7)\n A = Factor(['ev', 'no_ev'], repeat = len(subjects)/(len(Allconditions)/2))\n\n B = Factor(['lorel', 'hirel'], tile = len(subjects)/(len(Allconditions)/2)) #4)\n ds['EvType'] = A\n ds['LowHigh'] = B\n #subjects = ['R0374', 'R0374', 'R0374', 'R0374', 'R0411', 'R0411', 'R0411', 'R0411']\n\n '''dss = []\n ds = Dataset()\n for subject in subjects:\n\n\t ds['subject'] = ds['subject'].append(Factor([subject,subject,subject,subject], name = 'subject'))\n\t ds['EvType'] = ds['EvType'].append(Factor(['ev','ev','no_ev','no_ev'], name = 'EvType'))\n\t ds['LowHigh'] = ds['LowHigh'].append(Factor(['lorel','hirel','lorel','hirel'], name = 'LowHigh'))\n\t ds['Condition'] = ds['Condition'].append(Factor(['sat_ev_lorel', 'sat_ev_hirel','sat_no_ev_lorel', 'sat_no_ev_hirel'], name ='Condition'))\n\n dss.append(ds)\n\n ds = combine((dss))'''\n a = 1\n Results = testnd.anova(y, 'EvType*LowHigh', sub=None, ds=ds, samples=10, pmin=.3, fmin=None, minsource=3, mintime=.02, tfce=False, tstart=.1, tstop=.5, match='subject')\n a= 1\n cluster_table = Results.clusters.as_table()\n\n print(cluster_table)\n #QMessageBox.about(None, 'Data extraction complete, now running the statistical test','Analysis Complete Results are displayed in the main window')\n\n\n\n\n #plot.UTSClusters(Results, title=\"Random Effects Model\")\n #Results_table = cluster_table\n\n\n return Results\n\n\n#if __name__ == '__main__':\n\n# results = ANOVA22BESASpatial('root', 'condition1Name', 'condition2Name', 'startTime', 'endTime', 'presTim', 'PThresh',\n# 'minTemporalCluster', 'numPerm', 'anlsysTail', 'ROI')\n\n","repo_name":"esmam/MRATPython27","sub_path":"stats/ANOVAtoEelbrain.py","file_name":"ANOVAtoEelbrain.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74339850826","text":"import os\nimport json\nimport pandas as pd\n\n\ndef create_parameter_sets(base_input_folder, base_output_folder, symbols, event_clock_destinations, threshold_values,\n columns_to_resample, exclude_median_columns):\n \"\"\"\n Creates parameter sets for event clock resampling.\n\n Args:\n base_input_folder (str): Base input folder containing subfolders for each symbol.\n base_output_folder (str): Base output folder.\n symbols (List[str]): List of symbols.\n event_clock_destinations (Dict[str, str]): Dictionary mapping event clocks to their destinations.\n threshold_values (List[int]): List of threshold values for each event clock.\n columns_to_resample (List[str]): Columns to be resampled.\n exclude_median_columns (List[str]): Columns to exclude from median calculation.\n\n Returns:\n List[Dict[str, str]]: List of parameter sets.\n\n \"\"\"\n parameter_sets = []\n\n for symbol in symbols:\n input_folder = os.path.join(base_input_folder, symbol)\n output_folder = os.path.join(base_output_folder, symbol)\n\n # Collect all files in the input folder\n input_files = [os.path.join(input_folder, file) for file in os.listdir(input_folder) if file.endswith('.pkl')]\n\n for event_clock, event_clock_destination, threshold_value in zip(event_clock_destinations.keys(),\n event_clock_destinations.values(),\n threshold_values):\n for input_file in input_files:\n params = {\n 'input_file': input_file,\n 'output_folder': os.path.join(output_folder, event_clock_destination),\n 'event_clock_column': event_clock,\n 'event_clock_threshold': threshold_value,\n 'columns_to_resample': columns_to_resample,\n 'exclude_median_columns': exclude_median_columns,\n }\n parameter_sets.append(params)\n\n return parameter_sets\n\n\n# Define the parameters\nbase_input_folder = '/media/ak/Data/InterestRateFuturesData/ReconstructedLOB'\nbase_output_folder = '/media/ak/Data/InterestRateFuturesData/EventClocksFiles'\nsymbols = ['RX1','DU1', 'KE1']\nevent_clock_destinations = {\n 'NoOfTrades': 'tick',\n 'TradedVolume': 'volume',\n 'CCYTradedVolume': 'dollar',\n # Add more event clocks and their destinations if needed\n}\nthreshold_values = [1, 100, 1000] # Specify threshold values for each event clock\ncolumns_to_resample = ['time', 'BestBid', 'BestAsk', 'MicroPrice', 'arrival_rate', 'MeanRelativeTickVolume',\n 'OrderImbalance']\nexclude_median_columns = []\n\n# Create the parameter sets\nparameter_sets = create_parameter_sets(base_input_folder, base_output_folder, symbols, event_clock_destinations,\n threshold_values, columns_to_resample, exclude_median_columns)\nprint(parameter_sets)\n# Save parameter sets to a JSON file\nconfig_filepath = '/media/ak/Data/InterestRateFuturesData/EventClocksFiles/configmany.json'\nwith open(config_filepath, 'w') as f:\n json.dump(parameter_sets, f, indent=4)\n","repo_name":"andreas-koukorinis/PaperCode","sub_path":"stylised_facts/lob_for_futures/CreateJsonFileForFinalLOBVol.py","file_name":"CreateJsonFileForFinalLOBVol.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34654131706","text":"import argparse\nfrom collections import OrderedDict\n\nimport flwr as fl\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom contact_dataset import ContactDataset\n\nparser = argparse.ArgumentParser(description='Pytorch FedAvg')\nparser.add_argument('--identity_code', type=str, help='identity_code for dataset')\nargs = parser.parse_args()\nidentity_code = args.identity_code\n# ----------------------- LSTM -----------------------------\n# 实例化对象\ntrain_data = ContactDataset(identity_code)\nprint(f\"train_data:{train_data}\")\n# 将数据集导入DataLoader,进行shuffle以及选取batch_size\n# Windows里num_works只能为0,其他值会报错\ntrain_loader = DataLoader(train_data, batch_size=1, shuffle=False, num_workers=0)\ntest_loader = DataLoader(train_data, batch_size=1, shuffle=False, num_workers=0)\n\n\nINPUT_DIM = 2\nOUTPUT_DIM = 3\nHIDDEN_DIM = 6\n\n\nclass LSTMTagger(nn.Module):\n\n def __init__(self, input_dim, hidden_dim, tagset_size):\n super(LSTMTagger, self).__init__()\n self.hidden_dim = hidden_dim\n\n # The LSTM takes word embeddings as inputs, and outputs hidden states\n # with dimensionality hidden_dim.\n self.lstm = nn.LSTM(input_dim, hidden_dim)\n\n # The linear layer that maps from hidden state space to tag space\n self.hidden2tag = nn.Linear(hidden_dim, tagset_size)\n\n def forward(self, input):\n print(f\"input:{input}\")\n lstm_out, _ = self.lstm(torch.tensor(input, dtype=torch.float32))\n print(f\"lstm_out:{lstm_out[-1]}\")\n tag_space = self.hidden2tag(lstm_out[-1])\n print(f\"tag_space:{tag_space}\")\n tag_scores = F.softmax(tag_space, dim=0)\n print(f\"tag_scores:{tag_scores}\")\n return tag_scores\n\n\nnet = LSTMTagger(INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM)\n\n\n# See what the scores are before training\n# Note that element i,j of the output is the score for tag j for word i.\n# Here we don't need to train, so the code is wrapped in torch.no_grad()\n# with torch.no_grad():\n# inputs = training_data[0][0]\n# tag_scores = model(inputs)\n# print(f\"pre_test: {tag_scores}\")\n\n\ndef train(model, train_loader, epochs):\n \"\"\"Train the network on the training set.\"\"\"\n loss_function = nn.MSELoss()\n optimizer = optim.SGD(model.parameters(), lr=0.1)\n total_loss = 0\n # writer = SummaryWriter('./data_log/train')\n for epoch in range(epochs):\n loss_epoch = 0\n for features, tags in train_loader:\n # Step 1. Remember that Pytorch accumulates gradients.\n # We need to clear them out before each instance\n model.zero_grad()\n\n # Step 2. Run our forward pass.\n tag_scores = model(features)\n\n # Step 3. Compute the loss, gradients, and update the parameters by\n # calling optimizer.step()\n loss = loss_function(tag_scores, torch.tensor(tags, dtype=torch.float32))\n loss_epoch += loss\n # print(\"loss:\", loss)\n loss.backward()\n optimizer.step()\n # writer.add_scalar(\"loss\", loss_epoch, epoch)\n print(f\"loss_epoch:{loss_epoch}\")\n total_loss += loss_epoch\n return total_loss\n\n\ndef test(model, test_loader):\n \"\"\"Validate the network on the entire test set.\"\"\"\n loss_function = nn.MSELoss()\n correct, total, loss = 0, 0, 0.0\n with torch.no_grad():\n for features, tags in test_loader:\n # print(f\"features:{features} - tags:{tags}\")\n tag_scores = model(features)\n loss += loss_function(tag_scores, torch.tensor(tags, dtype=torch.float32))\n predicted = tag_scores\n total += 1\n pred_y = predicted.numpy()\n label_y = torch.stack(tags).numpy()\n diff = abs(np.array(pred_y - label_y))\n if np.max(diff) <= 0.1: # 输出概率差值全部小于0.05则认为是预测正确\n correct += 1\n # if pred_y.all == label_y.all:\n # correct += 1\n print(f\"pred_y: {pred_y}\")\n print(f\"label_y: {label_y}\")\n print(f\"diff: {diff}\")\n print(f\"correct: {correct}\")\n accuracy = correct / total\n print(f\"accuracy: {accuracy}\")\n return loss, accuracy\n\n\nclass ContactClient(fl.client.NumPyClient):\n\n # def __init__(self, cid, net):\n # self.cid = cid\n # self.net = net\n\n def get_parameters(self, config):\n return [val.cpu().numpy() for _, val in net.state_dict().items()]\n\n def set_parameters(self, parameters):\n params_dict = zip(net.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n net.load_state_dict(state_dict, strict=True)\n\n def fit(self, parameters, config):\n self.set_parameters(parameters)\n train(net, train_loader, epochs=1)\n return self.get_parameters(config={}), len(train_loader), {}\n\n def evaluate(self, parameters, config):\n self.set_parameters(parameters)\n loss, accuracy = test(net, test_loader)\n return float(loss), len(test_loader), {\"accuracy\": float(accuracy)}\n\n\n# def client_fn(cid) -> ContactClient:\n# # net = Net().to(DEVICE)\n# # trainloader = trainloaders[int(cid)]\n# # valloader = valloaders[int(cid)]\n# return ContactClient(cid)\n\nfl.client.start_numpy_client(server_address=\"localhost:8082\", client=ContactClient())\n\n# if __name__ == '__main__':\n# fl.client.start_numpy_client(server_address=\"[::]:8082\", client=ContactClient())\n","repo_name":"RediYo/FedAvg","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30618808733","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\n\ndef u(x):\n return m.sin(x) + m.cos(x)\n\nh = 0.01\n\nx = np.arange(-4*m.pi, 4*m.pi, h)\ny = np.vectorize(u)(x)\n\nplt.plot(x, y)\nplt.ylabel('u(x)')\nplt.show()\n","repo_name":"manudubinsky/simymod-2018-1c","sub_path":"practicas/practica4/resoluciones/arroyo-jakulj/ej1.a_graficar_u.py","file_name":"ej1.a_graficar_u.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10485716246","text":"\"\"\"\nDistribution List related tasks, for each type of email\n\"\"\"\n\nimport os\nimport requests\nimport datetime\n\nfrom flask import current_app\nfrom sqlalchemy import and_\n\nfrom app import db\nfrom app.common.utils import delete_fs_file\nfrom app.crm_resources.crm_distribution_lists.models import CRMDistributionList\nfrom app.crm_resources.crm_distribution_invitee_lists.models import (\n CRMDistributionInviteeList)\nfrom app.base import constants as APP\nfrom app.resources.email_credentials.helpers import get_smtp_settings\n\nfrom queueapp.tasks import celery_app, logger, send_email_actual\nfrom app.resources.unsubscriptions.helpers import (\n generate_unsubscribe_email_link)\nfrom app.resources.unsubscriptions.helpers import is_unsubscription\n\n\n@celery_app.task(bind=True, ignore_result=True, soft_time_limit=10000)\ndef send_distribution_invitee_list_email(\n self, result, row_id, *args, **kwargs):\n \"\"\"\n Sends the Distribution List invitees related email\n :param result:\n the result of previous task when chaining. Remember to pass True, when\n called as first of chain, or individually.\n :param row_id:\n the row id of the distribution list for which email is to be\n generated\n \"\"\"\n\n if result:\n try:\n distribution_list_data = CRMDistributionList.query.get(row_id)\n if not distribution_list_data:\n logger.exception('Distribution list does not exist')\n return False\n # get attachment files\n files = []\n if distribution_list_data.attachments:\n distribution_list_data.load_urls()\n for name, attach_url in zip(\n distribution_list_data.attachments,\n distribution_list_data.attachment_urls):\n response = requests.get(attach_url)\n file = os.path.join(\n current_app.config['BASE_DOWNLOADS_FOLDER'], name)\n with open(file, 'wb') as f:\n f.write(response.content)\n files.append(file)\n with open('email_html_docs/dist_list_template'\n '.html', 'r') as htmlfile:\n htmlfile = htmlfile.read()\n # generate the email content\n # default sender details\n from_name = current_app.config['DEFAULT_CA_SENDER_NAME']\n from_email = current_app.config['DEFAULT_CA_SENDER_EMAIL']\n # get the sender details, incase it is set in account_settings\n acc_setts = distribution_list_data.account.settings # account settings\n if (acc_setts.event_sender_email and\n acc_setts.event_sender_name and acc_setts.verified_status):\n # #TODO: always reverify, before sending\n from_name = acc_setts.event_sender_name\n from_email = acc_setts.event_sender_email\n\n reply_to = ''\n subject = current_app.config['BRAND_NAME'] + '-' + \\\n distribution_list_data.campaign_name\n attachment = files\n body = ''\n # fetch all invitee details\n distribution_invitee = CRMDistributionInviteeList.query.filter(\n and_(\n CRMDistributionInviteeList.distribution_list_id == row_id,\n CRMDistributionInviteeList.email_status == \\\n APP.EMAIL_NOT_SENT)).all()\n if distribution_invitee:\n html_teplate = distribution_list_data.html_template\n dynamic_body = {'body': html_teplate}\n for invitee in distribution_invitee:\n is_unsub = is_unsubscription(invitee.invitee_email,\n APP.EVNT_DIST_LIST, invitee)\n if not is_unsub:\n unsub_url = generate_unsubscribe_email_link(\n invitee.invitee_email)\n dynamic_body['unsubscribe'] = unsub_url\n html = htmlfile.format(**dynamic_body)\n smtp_settings = get_smtp_settings(\n distribution_list_data.created_by)\n send_email_actual(\n subject=subject, from_name=from_name,\n keywords=APP.DISTRIBUTION_EMAIL_TASK,\n from_email=from_email,\n to_addresses=[invitee.invitee_email],\n reply_to=reply_to, body=body, html=html,\n attachment=attachment, smtp_settings=smtp_settings)\n # for mail sent or not to invitee\n invitee.email_status = APP.EMAIL_SENT\n invitee.sent_on = datetime.datetime.utcnow()\n invitee.is_mail_sent = True\n db.session.add(invitee)\n db.session.commit()\n if attachment:\n for attach in attachment:\n delete_fs_file(attach)\n result = True\n except Exception as e:\n raise e\n logger.exception(e)\n result = False\n\n return result\n","repo_name":"Witzcode0/Exchange-connect","sub_path":"queueapp/crm_distribution_lists/email_tasks.py","file_name":"email_tasks.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14790542161","text":"\"\"\"\nHandling model.\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\n\nclass Model:\n \"\"\"\n Base class that each model extends.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Sets up model.\n \n \"\"\"\n raise NotImplementedError()\n\n def classify(self):\n \"\"\"\n Returns final activation for each class.\n\n Returns\n -------\n Tensor\n Tensor with activation for each class.\n\n \"\"\"\n return tf.reduce_max(self.output, axis=0)\n\n def create_feed_dict(self, input_image):\n \"\"\"\n Creates and returns feed dict for model with input image.\n\n Returns\n -------\n dict\n Feed dict with image\n\n \"\"\"\n input_name = (\n self.input_name\n if \":\" in self.input_name\n else \"{}:0\".format(self.input_name)\n )\n input_image_expanded = np.expand_dims(input_image, axis=0)\n feed_dict = {input_name: input_image_expanded}\n return feed_dict\n\n def find_layer_tensor(self, layer_name):\n \"\"\"\n Finds layer tensor in model by name.\n\n Parameters\n ----------\n layer_name : str\n Name of the layer.\n\n Raises\n ------\n ValueError\n If layer_name is invalid.\n\n Returns\n -------\n Tensor\n Tensor that represents layer.\n\n \"\"\"\n if self.input_name == layer_name:\n return self.input\n if self.output_name == layer_name:\n return self.output\n\n layer_id = self.conv_layers_names.index(layer_name)\n return self.conv_layers[layer_id]\n\n def find_neuron_tensor(self, layer_name, neuron_num):\n \"\"\"\n Finds tensor that represents neuron in model.\n\n Parameters\n ----------\n layer_name : str\n Name of the layer.\n neuron_num : int\n Neuron's number in layer.\n\n Raises\n ------\n ValueError\n If layer_name is invalid.\n\n Returns\n -------\n Tensor\n Tensor that represents neuron.\n\n \"\"\"\n layer = self.find_layer_tensor(layer_name)\n\n neuron_begin = np.zeros(len(layer.shape), dtype=np.int32)\n neuron_begin[-1] = neuron_num\n\n neuron_size = -np.ones(len(layer.shape), dtype=np.int32)\n neuron_size[-1] = 1\n\n return tf.slice(layer, begin=neuron_begin, size=neuron_size)\n\n def start_session(self):\n \"\"\"\n Starts session.\n\n Returns\n -------\n session : Tensorflow Session\n Started session.\n\n \"\"\"\n session = tf.compat.v1.InteractiveSession(graph=self.graph)\n init = tf.compat.v1.global_variables_initializer()\n session.run(init)\n return session\n","repo_name":"mproszewska/internn","sub_path":"internn/models/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70274150344","text":"from django.shortcuts import get_object_or_404\r\nfrom rest_framework.decorators import action\r\nfrom rest_framework import permissions, viewsets\r\nfrom rest_framework.response import Response\r\nfrom rest_framework_extensions.mixins import NestedViewSetMixin\r\n\r\nfrom datetime import datetime as date\r\n\r\nfrom contest.models import (\r\n\tFaq,\r\n\tTicket\r\n)\r\nfrom contest.serializers import (\r\n\tFaqSerializer,\r\n\tTicketSerializer,\r\n)\r\n\r\nfrom contest.emailagent import send_simple_email\r\n\r\nimport pdb\r\n\r\nclass FaqViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\r\n\t\"\"\"\r\n\tAPI endpoint that allows entries to be viewed or edited.\r\n\t\"\"\"\r\n\tqueryset = Faq.objects.all()\r\n\tserializer_class = FaqSerializer\r\n\tpermission_classes = [permissions.AllowAny]\r\n\t# permission_classes = [permissions.IsAuthenticated]\r\n\r\n\t@action(methods=['post'], detail=False)\r\n\tdef submit_ticket(self, request, **kwarg):\r\n\t\tstatus = 200\r\n\t\tmessage = 'Successfully Sent'\r\n\t\ttry:\r\n\t\t\tdata = request.data\r\n\t\t\tusername = ''\r\n\t\t\tif request.user:\r\n\t\t\t\tdata['creator'] = request.user.id\r\n\t\t\t\tusername = request.user.username\r\n\t\t\tdata['status'] = send_simple_email(data['title'], data['message'], username)\r\n\t\t\tdata['delivered'] = date.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n\t\t\tticket_serializer = TicketSerializer(data=data)\r\n\t\t\tif ticket_serializer.is_valid():\r\n\t\t\t\tnew_ticket = ticket_serializer.save()\r\n\r\n\t\texcept Exception as err:\r\n\t\t\tprint(err)\r\n\t\t\tstatus = 500\r\n\t\t\tmessage = 'Something went wrong.'\r\n\r\n\t\treturn Response(dict(message=message, status=status), status)\r\n\r\n","repo_name":"idev-a/ufc-stats","sub_path":"fighter/contest/views/faq_views.py","file_name":"faq_views.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28066076919","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport sys\r\n\r\nfile_dir = os.path.dirname(os.path.abspath(__file__))\r\nsys.path.append(os.path.join(file_dir, \"../\"))\r\n\r\nfrom knapsack_01 import knapsack_01\r\n\r\n\r\ndef test_knapsack_01():\r\n subset = knapsack_01(\r\n items=[\"A\", \"B\", \"C\", \"D\"],\r\n values=[2.2, 4, 2, 3],\r\n weights=[2, 3, 3, 5],\r\n capacity=9,\r\n )\r\n optimal = ([\"A\", \"B\", \"C\"], 8.2)\r\n\r\n assert set(subset[0]) == set(optimal[0])\r\n assert subset[1] == optimal[1]\r\n","repo_name":"antoniojkim/AlgLib","sub_path":"Algorithms/Dynamic Programming/01 Knapsack/tests/test_01_knapsack.py","file_name":"test_01_knapsack.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35172548507","text":"from codemanip.code_utils import join_string_by_pipe_char\n\nfrom litgen.options import LitgenOptions\n\nimport sys\nimport os\n\nTHIS_DIR = os.path.dirname(__file__)\nsys.path.append(THIS_DIR + \"/../../imgui/bindings\")\nfrom litgen_options_imgui import litgen_options_imgui, ImguiOptionsType # noqa: E402\n\n\ndef litgen_options_implot() -> LitgenOptions:\n options = litgen_options_imgui(ImguiOptionsType.imgui_h, docking_branch=True)\n options.namespace_root__regex = \"^ImPlot$\"\n options.srcmlcpp_options.functions_api_prefixes = \"IMPLOT_API|IMPLOT_TMP\"\n\n options.fn_force_overload__regex = \"BeginPlot\"\n options.fn_force_lambda__regex = join_string_by_pipe_char([\"^Contains$\"])\n\n options.fn_exclude_by_param_type__regex = \"ImPlotFormatter|ImPlotTransform\"\n\n options.function_names_replacements.add_first_replacement(\"ImGui\", \"Imgui\")\n options.type_replacements.add_first_replacement(\"ImGuiContext\", \"ImGui_Context\")\n\n options.type_replacements.add_last_replacement(r\"ImPlot([A-Z][a-zA-Z0-9]*)\", r\"\\1\")\n\n options.fn_params_buffer_types = join_string_by_pipe_char(\n [\n # // Scalar data types defined by imgui.h\n # // typedef unsigned int ImGuiID;// A unique ID used by widgets (typically the result of hashing a stack of string)\n # // typedef signed char ImS8; // 8-bit signed integer\n # // typedef unsigned char ImU8; // 8-bit unsigned integer\n # // typedef signed short ImS16; // 16-bit signed integer\n # // typedef unsigned short ImU16; // 16-bit unsigned integer\n # // typedef signed int ImS32; // 32-bit signed integer == int\n # // typedef unsigned int ImU32; // 32-bit unsigned integer (often used to store packed colors)\n # // typedef signed long long ImS64; // 64-bit signed integer\n # // typedef unsigned long long ImU64; // 64-bit unsigned integer\n \"uint8_t\",\n \"int8_t\",\n \"uint16_t\",\n \"int16_t\",\n \"uint32_t\",\n \"int32_t\",\n \"np_uint_l\", # Platform dependent: \"uint64_t\" on *nixes, \"uint32_t\" on windows\n \"np_int_l\", # Platform dependent: \"int64_t\" on *nixes, \"int32_t\" on windows\n \"float\",\n \"double\",\n \"long double\", # Note: long double not supported in implot (yet?)\n \"long long\",\n ]\n )\n\n options.fn_exclude_by_name__regex = join_string_by_pipe_char(\n [\n # Legitimate Excludes\n # Exclude functions whose name end with G, like for example\n # IMPLOT_API void PlotLineG(const char* label_id, ImPlotGetter getter, void* data, int count);\n # which are made for specialized C/C++ getters\n r\"\\w*G\\Z\",\n # Exclude function whose name ends with V, like for example\n # IMPLOT_API void TagXV(double x, const ImVec4& color, const char* fmt, va_list args) IM_FMTLIST(3);\n # which are utilities for variadic print format\n r\"\\w*V\\Z\",\n # Excludes due to two-dimensional buffer\n # PlotHeatmap(.., const T* values, int rows, int cols, !!!\n # ^ ^ ^\n \"PlotHeatmap\",\n # Excludes due to antique style string vectors\n # for which there is no generalizable parse\n # void SetupAxisTicks(ImAxis idx, const double* values, int n_ticks, const char* const labels[], bool show_default)\n # ^ ^\n \"SetupAxisTicks\",\n # IMPLOT_API ImPlotColormap AddColormap(const char* name, const ImU32* cols, int size, bool qual=true);\n # (This API is a bit exotic, and cannot be bound automatically)\n \"^AddColormap$\",\n ]\n )\n\n return options\n","repo_name":"pthom/imgui_bundle","sub_path":"external/implot/bindings/litgen_options_implot.py","file_name":"litgen_options_implot.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","stars":394,"dataset":"github-code","pt":"81"} +{"seq_id":"29813753691","text":"from flask import Flask, request, jsonify\nimport predict\n\napp = Flask(__name__)\n\n@app.route('/predict', methods=['POST'])\ndef run():\n data = request.get_json(force=True)\n input_dt = data['input']\n result = predict.predict(input_dt)\n return jsonify({'prediction': result})\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080)\n","repo_name":"sourabh-burnwal/eks_training","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40129411330","text":"#!/usr/bin/env python\n# Encoding: iso-8859-1\n# vim: tw=80 ts=4 sw=4 noet\n# -----------------------------------------------------------------------------\n# Project : Retro - HTTP Toolkit\n# -----------------------------------------------------------------------------\n# Author : Sebastien Pierre \n# License : Revised BSD License\n# -----------------------------------------------------------------------------\n# Creation : 27-Jul-2006\n# Last mod : 27-Jul-2008\n# -----------------------------------------------------------------------------\n\nimport sys\nimport webbrowser\nfrom os.path import abspath, dirname\nfrom retro import *\n\n__doc__ = \"\"\"\nThe 'record' module provides the 'Record' component that simply prints (and\noptionanly records) the incoming requests.\n\"\"\"\n\n# ------------------------------------------------------------------------------\n#\n# RECORD\n#\n# ------------------------------------------------------------------------------\n\n\nclass Record(Component):\n \"\"\"Records the requests made to the given URL.\"\"\"\n\n def __init__(self, prefix=\"/record\"):\n Component.__init__(self, name=\"Record\")\n self.PREFIX = prefix\n self.out = sys.stdout\n\n def log(self, data):\n sys.stdout.write(data)\n\n @on(GET=\"\")\n @on(GET=\"/{rest}\")\n def catchAll(self, request, rest=None):\n self.log(\"----8<---- START REQUEST ----------\\n\")\n self.log(request.environ(\"extra.request\"))\n self.log(\"\".join(request.environ(\"extra.headers\")))\n if request.data():\n self.log(request.data())\n self.log(\"----8<---- END REQUEST ----------\\n\")\n return request.respond(\"OK\")\n\n# ------------------------------------------------------------------------------\n#\n# MAIN\n#\n# ------------------------------------------------------------------------------\n\n\ndef run(args):\n if type(args) not in (type([]), type(())):\n args = [args]\n from optparse import OptionParser\n # We create the parse and register the options\n oparser = OptionParser(version=\"Retro[+record]\")\n oparser.add_option(\"-p\", \"--port\", action=\"store\", dest=\"port\",\n help=OPT_PORT, default=\"8000\")\n oparser.add_option(\"-f\", \"--files\", action=\"store_true\", dest=\"files\",\n help=\"Server local files\", default=None)\n # We parse the options and arguments\n options, args = oparser.parse_args(args=args)\n app = Application(components=[Record()])\n import retro\n return retro.run(app=app, sessions=False, port=int(options.port))\n\n# -----------------------------------------------------------------------------\n#\n# Main\n#\n# -----------------------------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n run(sys.argv[1:])\n\n# EOF\n","repo_name":"sebastien/retro","sub_path":"src/py/retro/contrib/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"75018719625","text":"#3st approach to make queue shift no space left and required both front and back\nclass ArrQueue:\n def __init__(self, size):\n self.size =size\n self.arr =[None]*self.size\n self.front=0\n self.rear =0\n def is_full(self):\n return self.rear-self.front ==self.size\n def is_empty(self):\n return self.rear==self.front\n def shift(self):\n \n\n for i in range(0, (self.rear-self.front)):\n\n self.arr[i] =self.arr[self.front+i]\n \n val =self.front\n self.front =0\n self.rear -=val\n \n \n def push(self, val):\n \n if self.is_full()!=True:\n \n self.arr[self.rear]=val\n self.rear +=1\n if self.rear==self.size and self.front!=0:\n self.shift()\n\n\n else:\n raise Exception(\"List Full\")\n \n\n def remove(self):\n if self.is_empty()!=True:\n self.front +=1\n if self.rear==self.size and self.front!=0:\n self.shift()\n\n \n \n\n\n\n\n\n else:\n raise Exception(\"Empty List\")\n\n\n def display(self):\n for i in range(self.front, self.rear):\n print(self.arr[i], end=\" \")\n\n print()\ndef main():\n a =ArrQueue(4)\n a.push(90)\n a.remove()\n\n\n a.push(89)\n a.push(67)\n a.push(98)\n a.push(100)\n a.display()\n a.remove()\n a.remove()\n a.display()\n a.push(10000)\n a.display()\n a.remove()\n a.display()\n a.push(98)\n a.display()\n \n\n\n\n\nmain()","repo_name":"FaiqImran123/Queue_ArrAY","sub_path":"arr_queue2.py","file_name":"arr_queue2.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43340773769","text":"def prime(n):\n if n==1:\n return 0\n for i in range(2,int(n**0.5)+1,1):\n if n%i==0:\n return 0\n return 1\nx=int(input())\ny=x+1\nwhile True:\n if prime(y):\n n=str(y)\n a=n[::-1]\n if n==a:\n print(n)\n break\n y+=1\n","repo_name":"Sivamedisetti/codemind-python","sub_path":"Program_to_find_out_the_next_prime_palindrome_number.py","file_name":"Program_to_find_out_the_next_prime_palindrome_number.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17620712680","text":"import os\nimport math\nfrom time import time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom base.BaseRecommender import BaseRecommender\nfrom dataloader.DataBatcher import DataBatcher\nfrom utils import Tool\n\n\nclass MultVAE(BaseRecommender):\n def __init__(self, dataset, model_conf, device):\n super(MultVAE, self).__init__(dataset, model_conf)\n self.dataset = dataset\n self.num_users = dataset.num_users\n self.num_items = dataset.num_items\n\n self.enc_dims = [self.num_items] + model_conf['enc_dims']\n self.dec_dims = self.enc_dims[::-1]\n self.dims = self.enc_dims + self.dec_dims[1:]\n\n self.total_anneal_steps = model_conf['total_anneal_steps']\n self.anneal_cap = model_conf['anneal_cap']\n\n self.dropout = model_conf['dropout']\n self.reg = model_conf['reg']\n\n self.batch_size = model_conf['batch_size']\n self.test_batch_size = model_conf['test_batch_size']\n\n self.lr = model_conf['lr']\n self.eps = 1e-6\n self.anneal = 0.\n self.update_count = 0\n\n self.device = device\n self.best_params = None\n\n self.build_graph()\n\n def build_graph(self):\n self.encoder = nn.ModuleList()\n for i, (d_in, d_out) in enumerate(zip(self.enc_dims[:-1], self.enc_dims[1:])):\n if i == len(self.enc_dims[:-1]) - 1:\n d_out *= 2\n self.encoder.append(nn.Linear(d_in, d_out))\n if i != len(self.enc_dims[:-1]) - 1:\n self.encoder.append(nn.Tanh())\n\n self.decoder = nn.ModuleList()\n for i, (d_in, d_out) in enumerate(zip(self.dec_dims[:-1], self.dec_dims[1:])):\n self.decoder.append(nn.Linear(d_in, d_out))\n if i != len(self.dec_dims[:-1]) - 1:\n self.decoder.append(nn.Tanh())\n\n # optimizer\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.reg)\n\n # Send model to device (cpu or gpu)\n self.to(self.device)\n\n def forward(self, x):\n # encoder\n h = F.dropout(F.normalize(x), p=self.dropout, training=self.training)\n for layer in self.encoder:\n h = layer(h)\n\n # sample\n mu_q = h[:, :self.enc_dims[-1]]\n logvar_q = h[:, self.enc_dims[-1]:] # log sigmod^2 batch x 200\n std_q = torch.exp(0.5 * logvar_q) # sigmod batch x 200\n\n # F.kl_div()\n\n epsilon = torch.zeros_like(std_q).normal_(mean=0, std=0.01)\n sampled_z = mu_q + self.training * epsilon * std_q\n\n output = sampled_z\n for layer in self.decoder:\n output = layer(output)\n\n if self.training:\n kl_loss = ((0.5 * (-logvar_q + torch.exp(logvar_q) + torch.pow(mu_q, 2) - 1)).sum(1)).mean()\n return output, kl_loss\n else:\n return output\n\n def train_model(self, dataset, evaluator, early_stop, logger, config):\n exp_config = config['Experiment']\n\n num_epochs = exp_config['num_epochs']\n print_step = exp_config['print_step']\n test_step = exp_config['test_step']\n test_from = exp_config['test_from']\n verbose = exp_config['verbose']\n log_dir = logger.log_dir\n\n # prepare dataset\n # dataset.set_eval_data('valid')\n users = np.arange(self.num_users)\n \n train_matrix = dataset.train_matrix.toarray()\n train_matrix = torch.FloatTensor(train_matrix)\n\n # for epoch\n start = time()\n for epoch in range(1, num_epochs + 1):\n self.train()\n\n epoch_loss = 0.0\n batch_loader = DataBatcher(users, batch_size=self.batch_size, drop_remain=False, shuffle=False)\n num_batches = len(batch_loader)\n # ======================== Train\n epoch_train_start = time()\n for b, batch_idx in enumerate(batch_loader):\n batch_matrix = train_matrix[batch_idx].to(self.device)\n\n if self.total_anneal_steps > 0:\n self.anneal = min(self.anneal_cap, 1. * self.update_count / self.total_anneal_steps)\n else:\n self.anneal = self.anneal_cap\n\n batch_loss = self.train_model_per_batch(batch_matrix)\n epoch_loss += batch_loss\n\n if verbose and (b + 1) % verbose == 0:\n print('batch %d / %d loss = %.4f' % (b + 1, num_batches, batch_loss))\n epoch_train_time = time() - epoch_train_start\n\n epoch_info = ['epoch=%3d' % epoch, 'loss=%.3f' % epoch_loss, 'train time=%.2f' % epoch_train_time]\n\n # ======================== Evaluate\n if (epoch >= test_from and epoch % test_step == 0) or epoch == num_epochs:\n self.eval()\n # evaluate model\n epoch_eval_start = time()\n\n test_score = evaluator.evaluate(self)\n test_score_str = ['%s=%.4f' % (k, test_score[k]) for k in test_score]\n\n updated, should_stop = early_stop.step(test_score, epoch)\n\n if should_stop:\n logger.info('Early stop triggered.')\n break\n else:\n # save best parameters\n if updated:\n torch.save(self.state_dict(), os.path.join(log_dir, 'best_model.p'))\n if self.anneal_cap == 1: print(self.anneal)\n\n epoch_eval_time = time() - epoch_eval_start\n epoch_time = epoch_train_time + epoch_eval_time\n\n epoch_info += ['epoch time=%.2f (%.2f + %.2f)' % (epoch_time, epoch_train_time, epoch_eval_time)]\n epoch_info += test_score_str\n else:\n epoch_info += ['epoch time=%.2f (%.2f + 0.00)' % (epoch_train_time, epoch_train_time)]\n\n if epoch % print_step == 0:\n logger.info(', '.join(epoch_info))\n\n total_train_time = time() - start\n\n return early_stop.best_score, total_train_time\n\n def train_model_per_batch(self, batch_matrix, batch_weight=None):\n # zero grad\n self.optimizer.zero_grad()\n\n # model forwrad\n output, kl_loss = self.forward(batch_matrix)\n\n # loss \n # ce_loss = -(F.log_softmax(output, 1) * batch_matrix).mean()\n if batch_weight is None:\n ce_loss = -(F.log_softmax(output, 1) * batch_matrix).sum(1).mean()\n else:\n ce_loss = -((F.log_softmax(output, 1) * batch_matrix) * batch_weight.view(output.shape[0], -1)).sum(1).mean()\n\n loss = ce_loss + kl_loss * self.anneal\n\n # backward\n loss.backward()\n\n # step\n self.optimizer.step()\n\n self.update_count += 1\n\n return loss\n\n def predict(self, user_ids, eval_pos_matrix, eval_items=None):\n self.eval()\n batch_eval_pos = eval_pos_matrix[user_ids]\n with torch.no_grad():\n eval_input = torch.Tensor(batch_eval_pos.toarray()).to(self.device)\n eval_output = self.forward(eval_input).detach().cpu().numpy()\n \n if eval_items is not None:\n eval_output[np.logical_not(eval_items)]=float('-inf')\n else:\n eval_output[batch_eval_pos.nonzero()] = float('-inf')\n self.train()\n return eval_output\n\n def restore(self, log_dir):\n with open(os.path.join(log_dir, 'best_model.p'), 'rb') as f:\n state_dict = torch.load(f)\n self.load_state_dict(state_dict)\n\n def user_embedding(self, input_matrix):\n with torch.no_grad():\n user_embedding = torch.zeros(self.num_users, self.enc_dims[-1])\n users = np.arange(self.num_users)\n\n input_matrix = torch.FloatTensor(input_matrix.toarray())\n\n batch_size = self.test_batch_size\n batch_loader = DataBatcher(users, batch_size=batch_size, drop_remain=False, shuffle=False)\n for b, (batch_user_idx) in enumerate(batch_loader):\n batch_matrix = input_matrix[batch_user_idx]\n batch_matrix = torch.Tensor(batch_matrix).to(self.device)\n\n h = F.dropout(F.normalize(batch_matrix), p=self.dropout, training=self.training)\n for layer in self.encoder:\n h = layer(h)\n batch_emb = h[:, :self.enc_dims[-1]] # mu\n\n user_embedding[batch_user_idx] += batch_emb.detach().cpu()\n\n return user_embedding.detach().cpu().numpy()\n\n def get_output(self, dataset):\n test_eval_pos, test_eval_target, _ = dataset.test_data()\n num_users = len(test_eval_target)\n num_items = test_eval_pos.shape[1]\n eval_users = np.arange(num_users)\n user_iterator = DataBatcher(eval_users, batch_size=1024)\n output = np.zeros((num_users, num_items))\n for batch_user_ids in user_iterator:\n batch_pred = self.predict(batch_user_ids, test_eval_pos)\n output[batch_user_ids] += batch_pred\n return output\n","repo_name":"jin530/LOCA","sub_path":"model/MultVAE.py","file_name":"MultVAE.py","file_ext":"py","file_size_in_byte":9116,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"81"} +{"seq_id":"41712304259","text":"from django.contrib.auth import logout\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.views import View\nfrom django.views.generic import ListView, CreateView\nfrom django.contrib.auth.views import LoginView\nfrom django.core.paginator import Paginator, PageNotAnInteger\n\nfrom .models import Profile, Comment\nfrom .forms import LoginUserForm, RegisterUserForm, AddCommentForm\n\n\nclass RegisterUser(CreateView):\n form_class = RegisterUserForm\n template_name = 'comments/register.html'\n success_url = reverse_lazy('main')\n\n\nclass UserLoginView(LoginView):\n form_class = LoginUserForm\n template_name = 'comments/login.html'\n\n def get_success_url(self):\n return reverse_lazy('main')\n\n\ndef logout_user(request):\n logout(request)\n return redirect('main')\n\n\nclass CommentBase(View):\n http_method_names = [\"get\", \"post\"]\n\n def get(self, request, *args, **kwargs):\n sort_by = self.request.GET.get('sort_by', '-created_time')\n object_list2 = Comment.objects.filter(parent=None).prefetch_related('replies__replies').order_by(sort_by)\n\n paginator = Paginator(object_list2, 15)\n page = request.GET.get('page')\n object_list = paginator.get_page(page)\n\n context = {\n 'object_list': object_list,\n }\n if request.user.is_authenticated:\n context['form'] = AddCommentForm\n return render(request, 'comments/index.html', context=context)\n\n def post(self, request, *args, **kwargs):\n sort_by = self.request.GET.get('sort_by', '-created_time')\n object_list = Comment.objects.filter(parent=None).prefetch_related('replies__replies').order_by(sort_by)\n\n paginator = Paginator(object_list, 15)\n page = request.GET.get('page')\n object_list = paginator.get_page(page)\n\n context = {\n 'object_list': object_list,\n }\n if request.user.is_authenticated:\n form = AddCommentForm(request.POST, request.FILES)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.save()\n else:\n form = AddCommentForm\n context['form'] = form\n return render(request, 'comments/index.html', context=context)\n","repo_name":"Zaza2215/SPA-comments","sub_path":"SPA/comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15255750759","text":"import psycopg2\n\n\nclass BDCreate:\n\n def __init__(self, database_name: str, params: dict):\n self.database_name = database_name\n self.params = params\n\n def create_database(self):\n \"\"\"Создание базы данных и таблиц для сохранения данных о каналах и видео.\"\"\"\n\n conn = psycopg2.connect(dbname='postgres', **self.params)\n conn.autocommit = True\n cur = conn.cursor()\n\n try:\n cur.execute(f'DROP DATABASE {self.database_name}')\n except:\n pass\n finally:\n cur.execute(f'CREATE DATABASE {self.database_name}')\n\n conn.close()\n\n conn = psycopg2.connect(dbname=self.database_name, **self.params)\n\n # with conn.cursor() as cur:\n # cur.execute(\"\"\"\n # CREATE TABLE employers (\n # employer_id SERIAL PRIMARY KEY,\n # employer VARCHAR\n # )\n # \"\"\")\n\n with conn.cursor() as cur:\n cur.execute(\"\"\"\n CREATE TABLE vacancies (\n vacancies_id SERIAL PRIMARY KEY,\n employer VARCHAR,\n vacancy VARCHAR NOT NULL,\n vacancy_url TEXT,\n city VARCHAR,\n salary INTEGER,\n currency VARCHAR\n )\n \"\"\")\n\n conn.commit()\n conn.close()\n\n # params = config()\n # bd = BDManager('coursework5', params)\n # bd.create_database()\n\n def save_data_to_database(self, data: list[dict]):\n \"\"\"Сохранение данных о каналах и видео в базу данных.\"\"\"\n\n conn = psycopg2.connect(dbname=self.database_name, **self.params)\n\n with conn.cursor() as cur:\n for i in range(len(data)):\n vacancy_data = data[i]\n if vacancy_data['salary'] is None:\n continue\n if vacancy_data['salary']['to'] is None:\n continue\n else:\n name_employer = vacancy_data['employer']['name']\n name_vacancy = vacancy_data['name']\n city = vacancy_data['area']['name']\n url = vacancy_data['alternate_url']\n salary_to = vacancy_data['salary']['to']\n currency = vacancy_data['salary']['currency']\n # cur.execute(\n # \"\"\"\n # INSERT INTO employers (employer)\n # VALUES (%s)\n # \"\"\",\n # name_employer\n # )\n # employer_id = cur.fetchone()[0]\n cur.execute(\n \"\"\"\n INSERT INTO vacancies (vacancy, employer, vacancy_url, city, salary, currency)\n VALUES (%s, %s, %s, %s, %s, %s)\n \"\"\",\n (name_vacancy, name_employer, url, city, salary_to, currency)\n )\n\n conn.commit()\n conn.close()","repo_name":"KirillGlu/coursework-5","sub_path":"classes/BDCreate.py","file_name":"BDCreate.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9799352155","text":"\nfrom random import randint\nfrom threading import Thread\nfrom time import time, sleep\n\n\nclass DownloadTask(Thread):\n def __init__(self, filename):\n super().__init__()\n self._filename = filename\n\n def run(self):\n print('Start downloading %s...' % self._filename)\n time_to_download = randint(3, 5)\n sleep(time_to_download)\n print('%s download completed! Cost %ss' % (self._filename, time_to_download))\n\n\ndef main():\n start = time()\n t1 = DownloadTask('Python_Learn.pdf')\n t1.start()\n\n t2 = DownloadTask('Shenzhen Hot.mkv')\n t2.start()\n\n # 阻塞线程,等待执行完成\n t1.join()\n t2.join()\n\n end = time()\n print('Total cost %.2fs' % (end - start))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hefengxian/python-playground","sub_path":"learn/process_thread/py_thread01.py","file_name":"py_thread01.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34051703424","text":"from flask import Flask, request, jsonify, render_template\r\nfrom flask_cors import CORS\r\n\r\nimport torch\r\nfrom torchvision.transforms import ToTensor\r\nfrom transformers import ViTModel, ViTFeatureExtractor\r\nfrom transformers.modeling_outputs import SequenceClassifierOutput\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nimport requests\r\nimport numpy\r\nfrom PIL import Image\r\nfrom torchvision.transforms import ToTensor\r\nimport io\r\nfrom io import BytesIO\r\nimport base64\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/analyze', methods=['POST'])\r\ndef analyze_image():\r\n # Extract the image from either the URL or the uploaded file\r\n if 'file' in request.files:\r\n # The user uploaded a file\r\n image_file = request.files['file']\r\n image = Image.open(image_file.stream)\r\n image = preprocess_image(image)\r\n encoded_image = None # No need to send the uploaded image back\r\n elif 'url' in request.form:\r\n try:\r\n # The user provided a URL\r\n image_url = request.form['url']\r\n image = download_and_preprocess_image(image_url)\r\n\r\n # Encode the image in base64\r\n buffered = BytesIO(requests.get(image_url).content)\r\n encoded_image = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\r\n except ValueError as e:\r\n # The URL is invalid\r\n return jsonify(error=str(e) + \"\\n\\nTry downloading the image and resubmitting\"), 400\r\n else:\r\n # No valid input provided\r\n return jsonify(error='No file or URL provided'), 400\r\n \r\n # Predict the image\r\n probabilities = predict_image(image, model, feature_extractor)\r\n\r\n ai_prob = float(probabilities[0][0])\r\n human_prob = float(probabilities[0][1])\r\n return jsonify({\r\n \"human\": human_prob,\r\n \"ai\": ai_prob,\r\n \"image\": f\"data:image/jpeg;base64,{encoded_image}\" if encoded_image else None\r\n })\r\n\r\nif __name__ == '__main__':\r\n\r\n # Define the model\r\n class ViTForImageClassification(nn.Module):\r\n def __init__(self, num_labels=2):\r\n super(ViTForImageClassification, self).__init__()\r\n self.vit = ViTModel.from_pretrained('google/vit-large-patch32-384')\r\n self.dropout = nn.Dropout(0.1)\r\n self.classifier = nn.Linear(self.vit.config.hidden_size, num_labels)\r\n self.num_labels = num_labels\r\n\r\n def forward(self, pixel_values, labels=None):\r\n outputs = self.vit(pixel_values=pixel_values)\r\n output = self.dropout(outputs.last_hidden_state[:,0])\r\n logits = self.classifier(output)\r\n \r\n if labels is not None:\r\n loss_fct = nn.CrossEntropyLoss()\r\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\r\n return SequenceClassifierOutput(\r\n loss=loss,\r\n logits=logits,\r\n )\r\n else:\r\n return logits\r\n \r\n # Initialize model and feature extractor\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-large-patch32-384')\r\n model_path = \"./app/static/AID96k_E15_384.pth\"\r\n model = ViTForImageClassification(num_labels=2)\r\n model.load_state_dict(torch.load(model_path))\r\n model.to(device)\r\n model.eval()\r\n\r\n def download_and_preprocess_image(image_url, desired_size=384):\r\n # Send a GET request to the image URL\r\n response = requests.get(image_url)\r\n if response.status_code != 200:\r\n raise ValueError(\"Failed to download the image.\")\r\n\r\n # Convert bytes to a PIL Image object\r\n im = Image.open(io.BytesIO(response.content))\r\n\r\n # Resize and pad the image\r\n old_size = im.size\r\n ratio = float(desired_size) / max(old_size)\r\n new_size = tuple([int(x*ratio) for x in old_size])\r\n im = im.resize(new_size, Image.ANTIALIAS)\r\n\r\n # Create a new image and paste the resized on it\r\n new_im = Image.new(\"RGB\", (desired_size, desired_size), \"white\")\r\n new_im.paste(im, ((desired_size-new_size[0])//2,\r\n (desired_size-new_size[1])//2))\r\n\r\n return new_im\r\n\r\n def preprocess_image(image, desired_size=384):\r\n im = image\r\n\r\n # Resize and pad the image\r\n old_size = im.size\r\n ratio = float(desired_size) / max(old_size)\r\n new_size = tuple([int(x*ratio) for x in old_size])\r\n im = im.resize(new_size)\r\n\r\n # Create a new image and paste the resized on it\r\n new_im = Image.new(\"RGB\", (desired_size, desired_size), \"white\")\r\n new_im.paste(im, ((desired_size-new_size[0])//2, (desired_size-new_size[1])//2))\r\n return new_im\r\n \r\n \r\n\r\n def predict_image(image, model, feature_extractor):\r\n # Ensure model is in eval mode\r\n model.eval()\r\n\r\n # Convert image to tensor\r\n transform = ToTensor()\r\n input_tensor = transform(image)\r\n input_tensor = torch.tensor(numpy.array(feature_extractor(input_tensor)['pixel_values']))\r\n\r\n # Move tensors to the right device\r\n input_tensor = input_tensor.to(device)\r\n\r\n # Forward pass of the image through the model\r\n output = model(input_tensor)\r\n\r\n # Convert model output to probabilities using softmax\r\n probabilities = torch.nn.functional.softmax(output, dim=1)\r\n\r\n return probabilities.cpu().detach().numpy()\r\n \r\n app.run(debug=True, host='0.0.0.0')","repo_name":"Urist-Mc-Urist/AI_detector","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21697109084","text":"import uploads\nimport auth\nimport os\n\nfrom sys import argv\n\nBIN = os.path.join(os.getcwd(), 'bin')\n\nif __name__ == '__main__':\n _, path = argv\n\n print(\"Authenticating...\")\n SCOPES = ['https://www.googleapis.com/auth/photoslibrary.appendonly']\n service, creds = auth.getService(SCOPES)\n print(\"Authenticated. Checking for cache[success.json]...\")\n\n if not os.path.isdir(BIN):\n os.makedirs(BIN)\n\n cache = uploads.getCache(BIN)\n\n success = uploads.upload(path, creds, service, ignore=cache, bindir=BIN)\n\n print('SUCCESS' if success==True else 'Unable to upload some files. Please retry')\n","repo_name":"shraiysh/Photos-Upload","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7448076868","text":"#!/usr/bin/env python3\n\"\"\"Sets based routines\"\"\"\n\nfrom itertools import chain, combinations\nfrom typing import Collection, Iterable, Optional\n\nimport logging\n\nlogger = logging.getLogger(\"SETS\")\n\n\ndef sets(args=None):\n logger.debug(\"sets\")\n s1 = {0, 1, 2, 3, 4, 0, 3, 2, 1}\n s2 = {11, 22, 33, 44, 33, 22, 11}\n merge_sets(s1, s2)\n\n\ndef merge_sets(s1, s2):\n logger.info(s1)\n s1.update(s2)\n logger.info(s1)\n logger.info(s2)\n\n\ndef from_practice(args):\n fruits = [\"apple\", \"banana\", \"orange\", \"mango\", \"pineapple\"]\n fruits_powerset = list(subsets(fruits))\n fruits_subsets_1to3 = list(subsets(fruits, min_size=1, max_size=3))\n\n print(f\"Power set of {fruits}: {fruits_powerset}\", end=\"\\n\\n\")\n print(f\"Subsets of {fruits} of size 1 to 3: {fruits_subsets_1to3}\")\n\n\n# https://python.plainenglish.io/a-python-recipe-for-generating-subsets-a4a4e191df3d\n# https://en.wikipedia.org/wiki/Power_set\n#\ndef subsets(collection: Collection, min_size: int = 0, max_size: Optional[int] = None) -> Iterable:\n \"\"\"Produce all the subsets of `collection` with cardinalities between\n `min_size` and `max_size` (inclusive).\"\"\"\n\n min_size = max(0, min_size)\n max_size = max_size if max_size is not None else len(collection)\n\n return chain(*{map(set, combinations(collection, r)) for r in range(min_size, max_size + 1)})\n","repo_name":"kettlewell/me","sub_path":"libs/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5343673648","text":"n=input()\nn1=list(n)\nl=['a','e','i','o','u']\nfor i in n1:\n if i in l:\n l.remove(i)\nif len(l)==0:\n print(\"0\")\nelse:\n print(*l)","repo_name":"Harish0587/codemind-python","sub_path":"vowels_not_in_a_string.py","file_name":"vowels_not_in_a_string.py","file_ext":"py","file_size_in_byte":141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34499679975","text":"from numpy import random\nimport numpy as np\nfrom numba import njit,jit,prange\n\n# class LIF():\n# def __init__(self, Vr, Vth, tau, t):\n# self.Vr = Vr\n# self.V0 = Vr\n# self.Vth = Vth\n# self.tau = tau\n# self.t = t\n# self.dt = t[1] - t[0]\n#\n# def set_FF_weights(self,w):\n# self.w = w\n#\n# def set_input(self, input):\n# self.input = input\n#\n# def simulate(self, mu0, sigma):\n# nsteps = len(self.t)\n# ntrials = self.input.shape[2]\n# dt = self.dt\n# tau = self.tau\n# Vr = self.Vr\n# Vth = self.Vth\n# spike_trains = np.zeros((nsteps,ntrials))\n# Vtot = np.empty((nsteps,ntrials))\n# for i_trial in range(ntrials):\n# V = np.empty(nsteps)\n# V[0] = self.V0\n# for i in range(nsteps-1):\n# dV = dt/tau * (Vr-V[i]+ mu0 + sigma * random.normal(0,1)/np.sqrt(dt)) + np.dot(self.w, self.input[:,i,i_trial])\n# V[i+1] = V[i] + dV\n# if V[i+1] > Vth:\n# spike_trains[i+1,i_trial] = 1\n# V[i] = 20\n# V[i+1] = Vr\n# Vtot[:,i_trial] = V\n# self.Vtot = Vtot\n# self.spike_trains = spike_trains\n# return self.spike_trains, self.Vtot\n\n@njit(parallel = True)\ndef simulate_lif(Vr, Vth, tau, t, w, w_mf, input, input_mf, mu0, sigma):\n V0 = Vr\n dt = t[1] - t[0]\n nsteps = len(t)\n ntrials = input.shape[2]\n spike_trains = np.zeros((nsteps, ntrials))\n Vtot = np.empty((nsteps, ntrials))\n w = np.ascontiguousarray(w)\n w_mf = np.ascontiguousarray(w_mf)\n for i_trial in prange(ntrials):\n V = np.empty(nsteps)\n V[0] = V0\n for i in range(nsteps - 1):\n dV = dt / tau * ( Vr - V[i] + mu0 +\n sigma * random.normal(0, 1) / np.sqrt(dt) +\n tau/dt * w @ np.ascontiguousarray(input[:,i,i_trial]) )\n V[i + 1] = V[i] + dV\n if V[i + 1] > Vth:\n spike_trains[i + 1, i_trial] = 1\n V[i] = 20\n V[i + 1] = Vr\n Vtot[:, i_trial] = V\n return spike_trains, Vtot\n\n\n\n\n\n\n##\n\n","repo_name":"gbondanelli/BiophysicalReadout","sub_path":"modules_/lif.py","file_name":"lif.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11492941016","text":"#grid = arcpy.GridIndexFeatures_cartography(\"in_memory\\grid\", \"Predios_Nacionales\", \"INTERSECTFEATURE\", polygon_width = \"20 kilometers\", polygon_height = \"20 kilometers\")\r\n#arcpy.DeleteRows_management(\"Predios_test\")\r\nimport time, arcpy, multiprocessing, Queue\r\n\r\ndef intersectar(poly):\r\n t1 = time.clock()\r\n arcpy.SelectLayerByLocation_management(predios, \"HAVE_THEIR_CENTER_IN\", poly)\r\n p = r\"in_memory/predioTemp\"\r\n arcpy.Intersect_analysis(\"%s #;%s #\" % (predios.name, cancha.name), out_feature_class = p)\r\n arcpy.Append_management(p, PrediosTotal, \"NO_TEST\")\r\n arcpy.AddMessage(str(time.clock()-t1))\r\n\r\n\r\nTIMEOUT = 10\r\nPrediosTotal = \"Predios_test\"\r\nt0 = time.clock()\r\n\r\npredios = arcpy.GetParameter(0)\r\ncancha = arcpy.GetParameter(1)\r\ngrilla = arcpy.GetParameter(2)\r\n\r\npolys = arcpy.CopyFeatures_management(grilla, arcpy.Geometry())\r\nfor poly in polys:\r\n intersectar(poly)\r\n\r\narcpy.AddMessage(time.clock()-t0)\r\narcpy.SetParameter(3, PrediosTotal)","repo_name":"javierayax/bathGeoprocessing","sub_path":"IntersectXBloques.py","file_name":"IntersectXBloques.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24148559980","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nM = np.loadtxt(\"DATA\\\\data10.txt\", skiprows=3)\nx1 = M[:, 0]\ny1 = M[:, 1]\nx2 = M[:, 2]\ny2 = M[:, 3]\nx3 = M[:, 4]\ny3 = M[:, 5]\n\np1 = np.polyfit(x1, y1, 1)\np2 = np.polyfit(x2, y2, 1)\np3 = np.polyfit(x3, y3, 1)\n\nxx1 = np.linspace(min(x1), max(x1), 50)\nxx2 = np.linspace(min(x2), max(x2), 50)\nxx3 = np.linspace(min(x3), max(x3), 50)\n\nz1 = np.polyval(p1, xx1)\nz2 = np.polyval(p2, xx2)\nz3 = np.polyval(p3, xx3)\n\n\nfig = plt.figure()\nplt.plot(x1, y1, \"o\", xx1, z1, \"-\")\nplt.legend([\"Data\", \"Line of Best Fit\"], loc=\"best\")\nplt.title(\"Least Square Fit of a Straight Line for Data Set A\")\nplt.xlabel(\"$X_1$\")\nplt.ylabel(\"$Y_1$\")\n\nfig.savefig(\"figure1.png\")\nfig = plt.figure()\nplt.plot(x2, y2, \"o\", xx2, z2, \"-\")\nplt.legend([\"Data\", \"Line of Best Fit\"], loc=\"best\")\nplt.title(\"Least Square Fit of a Straight Line for Data Set B\")\nplt.xlabel(\"$X_2$\")\nplt.ylabel(\"$Y_2$\")\n\nfig.savefig(\"figure2.png\")\nfig = plt.figure()\nplt.plot(x3, y3, \"o\", xx3, z3, \"-\")\nplt.legend([\"Data\", \"Line of Best Fit\"], loc=\"best\")\nplt.title(\"Least Square Fit of a Straight Line for Data Set C\")\nplt.xlabel(\"$X_3$\")\nplt.ylabel(\"$Y_3$\")\nfig.savefig(\"figure3.png\")\n\nplt.show()\n\n","repo_name":"kconfeiteiro/coursework","sub_path":"Fall 2023/MA 305/(001) Classwork/CW10/References/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5274269042","text":"import sys\nimport time\nimport requests\nfrom pymongo import MongoClient\nimport os\nimport pymongo\nimport ast\nimport re\nfrom decouple import config\nfrom datetime import datetime\nfrom telegram import ParseMode\nimport pytz\nserver_date = datetime.now()\ntimezone = pytz.timezone(\"America/Bogota\")\nperu_date = server_date.astimezone(timezone)\ndate = peru_date.strftime(\"%d/%m/%Y\" )\n\n\n\ndef send_telegram(message):\n requests.post(config(\"TELEGRAM_KEY\"),\n \n data= {'chat_id': '-1001811194463','text': str(message) , 'parse_mode':ParseMode.HTML} ) # DISC0VERY\n \n\nclient = MongoClient(config(\"MONGO_DB\"))\ndb5 = client[\"scrap\"]\ncollection5 = db5[\"scrap\"] \n \n\ndef busqueda(codigo):\n \n\n t5 = collection5.find({\"sku\":str(codigo)})\n print( \"se realizo busqueda\")\n print(codigo)\n for i in t5:\n print(i)\n print(\"se envio a telegram\") \n send_telegram (\"Marca: \"+i[\"brand\"]+\"\\nModelo: \"+i[\"product\"]+\"\\nPrecio Lista :\"+str(i[\"list_price\"])+\"\\nPrecio web :\"+str(i[\"best_price\"])+\"\\nPrecio Tarjeta :\"+str(i[\"card_price\"])+\"\\n\"+i[\"image\"]+\"\\n\\nLink :\"+i[\"link\"])\n\n\n\n\ndef search_brand_dsct(brand,dsct):\n \n if dsct <41:\n dsct = 40\n t5 = collection5.find({\"brand\":{\"$in\":[ re.compile(str(brand), re.IGNORECASE)]}, \"web_dsct\":{\"$gte\":int(dsct)}, \"date\": date}).sort([{\"web_dsct\", pymongo.DESCENDING}])\n \n print( \"se realizo busqueda\")\n\n count = 0\n for i in t5:\n count = count+1\n if count == 100:\n break\n print(i)\n print(\"se envio a telegram\") \n send_telegram (\"Marca: \"+i[\"brand\"]+\"\\nModelo: \"+i[\"product\"]+\"\\nPrecio Lista :\"+str(i[\"list_price\"])+\"\\nPrecio web :\"+str(i[\"best_price\"])+\"\\nPrecio Tarjeta :\"+str(i[\"card_price\"])+\"\\n\"+i[\"image\"]+\"\\n\\nLink :\"+i[\"link\"])\n time.sleep(1)\n\n\n\n\n\n\n\n","repo_name":"jechs83/my_bot_py","sub_path":"buscador/draft/telegram_busca.py","file_name":"telegram_busca.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34685588482","text":"#!/usr/bin/env python\nfrom abc import ABC\n\nimport ast\nimport click\n\nimport multiprocessing\nimport uvicorn as unicorn # lol\nimport celery\n\nfrom gunicorn.app.base import BaseApplication\nfrom typing import Any, Callable, Dict, TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from asgiref.typing import ASGIApplication\n\n\ndef number_of_cpus():\n return multiprocessing.cpu_count()\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\nclass StandaloneApplication(BaseApplication, ABC):\n def __init__(self, application: Union[\"ASGIApplication\", Callable, str],\n options: Dict[str, Any] = None):\n self.options = options or {}\n self.application = application\n super().__init__()\n\n def load_config(self):\n config = {\n key: value\n for key, value in self.options.items()\n if key in self.cfg.settings and value is not None\n }\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return self.application\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\n@click.option('--module', default='app.asgi:app', help='Python module for uvicorn to run.', show_default=True)\n@click.option('--host', default='0.0.0.0', help='Address to listen on.', show_default=True)\n@click.option('--port', default=8080, help='Port to listen on.', show_default=True)\n@click.option('--workers', default=1, help='Number of workers to use.', show_default=True)\n@click.option('--log-level', default='info', help='Log level to use.', show_default=True)\ndef uvicorn(module: str, host: str, port: int, workers: int, log_level: str):\n unicorn.run(module, host=host, port=port, log_level=log_level.lower(), workers=workers)\n\n\n@cli.command()\n@click.option('--module', default='app.asgi:app', help='Python module for uvicorn to run.', show_default=True)\n@click.option('--host', default='0.0.0.0', help='Address to listen on.', show_default=True)\n@click.option('--port', default=8080, help='Port to listen on.', show_default=True)\n@click.option('--workers', default=number_of_workers(), help='Number of workers to use.', show_default=True)\n@click.option('--log-level', default='info', help='Log level to use.', show_default=True)\ndef gunicorn(module: str, host: str, port: int, workers: int, log_level: str):\n options = {\n \"bind\": \"%s:%s\" % (host, port),\n \"workers\": workers,\n \"worker_class\": \"uvicorn.workers.UvicornWorker\",\n \"loglevel\": log_level\n }\n StandaloneApplication(module, options).run()\n\n\n@cli.group(name=\"celery\")\ndef celery_group():\n pass\n\n\n@celery_group.command(name=\"worker\")\n@click.option('--app', '-A', default='app.tasks', help='Application', show_default=True)\n@click.option('--broker', '-b', default='redis://localhost:6379/0', help='', show_default=True)\n@click.option('--result-backend', default=None, help='', show_default=True)\n@click.option('--task-events', '-E', is_flag=True, help='Enable sending task events.', show_default=True)\n@click.option('--hostname', '-n', default=\"celery@%h\", help='Set custom hostname.', show_default=True)\n@click.option('--concurrency', '-c', default=number_of_cpus(), help='Number of concurrent processes/threads.',\n show_default=True)\n@click.option('--log-level', default='info', help='Logging level.', show_default=True)\ndef worker_command(app: str, broker: str, result_backend: str, task_events: bool, hostname: str, concurrency: int,\n log_level: str):\n a = celery.Celery()\n\n celery_args = ['--app', app, '--broker', broker]\n if result_backend is not None:\n celery_args.extend(['--result-backend', result_backend])\n\n worker_args = ['worker',\n '--hostname', hostname,\n '--concurrency', str(concurrency),\n f'--loglevel={log_level.lower()}']\n if task_events:\n worker_args.append('--task-events')\n\n args = celery_args + worker_args\n print(args)\n\n a.start(argv=args)\n\n\n@celery_group.command(name=\"beat\")\n@click.option('--app', '-A', default='app.tasks', help='Application', show_default=True)\n@click.option('--log-level', default='info', help='Logging level.', show_default=True)\ndef beat_command(app: str, log_level: str):\n a = celery.Celery()\n celery_args = ['--app', app]\n beat_args = ['beat', f'--loglevel={log_level.lower()}']\n args = celery_args + beat_args\n a.start(argv=args)\n\n\n@celery_group.command(name=\"flower\")\n@click.option('--app', '-A', default='app.tasks', help='Application', show_default=True)\n@click.option('--broker', '-b', default='redis://localhost:6379/0', help='', show_default=True)\n@click.option('--result-backend', default=None, help='', show_default=True)\n@click.option('--address', '-a', default='0.0.0.0', help='Address to listen on.', show_default=True)\n@click.option('--port', '-p', default=5555, help='Port to listen on.', show_default=True)\ndef flower_command(app: str, broker: str, result_backend: str, address: str, port: int):\n a = celery.Celery()\n\n celery_args = ['--app', app, '--broker', broker]\n if result_backend is not None:\n celery_args.extend(['--result-backend', result_backend])\n\n flower_args = ['flower', f'--address={address}', f'--port={port}']\n args = celery_args + flower_args\n a.start(argv=args)\n\n\nclass PythonLiteralOption(click.Option):\n def type_cast_value(self, ctx, value):\n try:\n return ast.literal_eval(value)\n except:\n raise click.BadParameter(value)\n\n\n@celery_group.command()\n@click.option('--args', cls=PythonLiteralOption, default='[\"--version\"]', help=\"Run a custom command.\", show_default=True)\ndef command(args):\n a = celery.Celery()\n a.start(argv=args)\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"nmcbride/asgi_falcon","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":5794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26345959019","text":"# Del A\r\ndef at_least_two(word,c):\r\n # tom telle variabel\r\n count = 0\r\n # gjør ordet om til en liste\r\n listed_word = list(word)\r\n # løper gjennom bokstav for bokstav\r\n for letter in word:\r\n # sjekker om noen av de er like\r\n if c == letter:\r\n count += 1\r\n # sjekker om telle variabelen er større eller lik 2\r\n if count >= 2:\r\n return True\r\n else: return False\r\n\r\n# del B\r\ndef at_least_two_in_list(wordlist,c):\r\n # tom liste variabel\r\n list_word_letter = []\r\n # løper gjennom ord for ord\r\n for word in wordlist:\r\n # gjør ordet om til liste\r\n listed_word = list(word)\r\n # og bruker samme fremgangsmåte som i del A\r\n count = 0\r\n for letter in listed_word:\r\n if c == letter:\r\n count += 1\r\n if count >= 2:\r\n list_word_letter.append(word)\r\n # returnerer lengden av listen\r\n return len(list_word_letter)\r\n\r\n# del C\r\n# funksjon som leser filen\r\ndef read_file(path):\r\n with open(path,\"rt\",encoding=\"utf-8\") as f:\r\n return f.read()\r\n# hovedfunksjonen som sjekker om det er to av noe i filen\r\ndef at_least_two_in_file(path,c):\r\n # tom telle variabler for antall ord\r\n count_word = 0\r\n # leser gjennom filen\r\n file_content = read_file(path)\r\n # finner ord i teksten\r\n for word in file_content.splitlines():\r\n print(word)\r\n count_letter = 0\r\n # går gjennom bokstav og sammenlikner\r\n for letter in word:\r\n if c == letter:\r\n count_letter += 1\r\n # sjekker antall bokstaver\r\n if count_letter >= 2:\r\n count_word += 1\r\n # returnerer antall ord med riktig forekomst av bokstaven\r\n return count_word\r\nprint(at_least_two_in_file(\"lab10/wordlist.txt\", \"and\"))","repo_name":"amandaskaugerud/INF100","sub_path":"lab10/uke_10_oppg_1.py","file_name":"uke_10_oppg_1.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40477325350","text":"from datetime import datetime\nimport logging\n\nimport bson\nfrom pymongo import ReturnDocument\n\nimport barin as b\nimport barin.schema as s\n\nlog = logging.getLogger(__name__)\nmetadata = b.Metadata()\n\n# Message statuses: pending, ready, busy, error\n\n_schedule = b.subdocument(\n metadata, '_schedule',\n b.Field('priority', int, default=10),\n b.Field('after', datetime, default=datetime.fromtimestamp(0)),\n b.Field('status', str, default='pending'),\n b.Field('message', str, default=''),\n b.Field('worker', str, default=None))\n\n\n@b.cmap(b.collection(\n metadata, 'c2.message',\n b.Field('_id', s.ObjectId, default=bson.ObjectId),\n b.Field('s', metadata.cref('_schedule')),\n b.Field('task', str),\n b.Field('payload', {str: None})))\nclass Message(object):\n\n def __repr__(self):\n return ''.format(self._id, self.task)\n\n @classmethod\n def reserve(cls, worker):\n now = datetime.utcnow()\n q = cls.m.query\n q = q.match(cls.s.status == 'ready')\n q = q.match(cls.s.after <= now)\n q = q.sort(-cls.s.priority)\n q = q.sort(-cls.s.priority)\n return q.find_one_and_update(\n cls.s.status.set('busy')\n & cls.s.worker.set(worker),\n return_document=ReturnDocument.AFTER)\n\n def error(self, message):\n cls = self.__class__\n self.m.update(\n cls.s.status.set('error') &\n cls.s.message.set(message))","repo_name":"Arborian/chapman2","sub_path":"chapman2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18570998310","text":"__author__ = \"hsk81\"\n__date__ = \"$Mar 10, 2012 12:30:40 AM$\"\n\n################################################################################\n################################################################################\n\nfrom django.db import transaction\nfrom django.http import HttpResponse\n\nfrom editor.models import NODE\nfrom editor.models import LEAF\n\nimport base64\nimport json\n\n################################################################################\n################################################################################\n\n@transaction.commit_manually\ndef rename (request):\n\n (type, ids) = json.loads (base64.b32decode (request.POST['nodeId']))\n if type == 'node':\n node = NODE.objects.get (pk = ids[0])\n node.name = request.POST['name']\n node.save ()\n\n response = success (\n nodeId = request.POST['nodeId'],\n name = request.POST['name'])\n\n elif type == 'leaf':\n leaf = LEAF.objects.get (pk = ids[1])\n leaf.name = request.POST['name']\n leaf.save ()\n\n response = success (\n nodeId = request.POST['nodeId'],\n name = request.POST['name'])\n\n else:\n response = failure (nodeId = request.POST['nodeId'], name = None)\n\n return response\n\n################################################################################\n\ndef success (nodeId, name):\n transaction.commit ()\n return http_response (True, nodeId, name)\n\ndef failure (nodeId, name):\n transaction.rollback ()\n return http_response (False, nodeId, name)\n\ndef http_response (success, nodeId, name):\n\n js_string = json.dumps ({\n 'success' : success,\n 'id' : nodeId,\n 'name' : name\n })\n\n return HttpResponse (js_string, mimetype='application/json')\n\n################################################################################\n################################################################################\n","repo_name":"hsk81/notex-v1.0","sub_path":"editor/views/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"de","doc_type":"code","stars":30,"dataset":"github-code","pt":"81"} +{"seq_id":"15346331045","text":"import pandas as pd\nimport transformers\nfrom tqdm import tqdm\n\n\ndef calc_length(\n df: pd.DataFrame,\n text_col: str,\n tokenizer: transformers.models.deberta.tokenization_deberta_fast.DebertaTokenizerFast,\n) -> list:\n lengths = []\n tk0 = tqdm(df[text_col].fillna(\"\").values, total=len(df))\n for text in tk0:\n length = len(tokenizer(text, add_special_tokens=False)[\"input_ids\"])\n lengths.append(length)\n return lengths\n","repo_name":"sinchir0/kaggle_nbme","sub_path":"nbme_pipeline/ex001/calc_text_len.py","file_name":"calc_text_len.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"74336703306","text":"import os\nimport numpy as np\nfrom copy import copy\nfrom math import floor, log10\nfrom time import time\nfrom .utils import pca\nimport pickle\n\nfrom .dqm_pure_python import choose_basis_by_distance_python, build_overlaps_python,\\\n make_operators_python, build_frames_python\n\nfrom . import dqm_lib # compiled-function library\n\ntry:\n from matplotlib import pyplot as plt\n HAVE_PLT = True\nexcept ModuleNotFoundError:\n HAVE_PLT = False\n\n\nclass DQM:\n '''\n This is the main class for using DQM (Dynamic Quantum Mapping).\n\n The code has a reasonable number of error checks, but it's a complicated system. The onus is currently on\n the user to make sure that the choice of parameter settings makes sense and that a given instance of the\n class doesn't wind up in an inconsistent state.\n\n Class variables:\n\n :cvar min_report_time: Report class-level execution times that are this many seconds or longer. Default 10.\n\n Instance variables (general):\n\n :ivar verbose: Boolean: whether to report on various operations. Default True.\n :ivar min_report_time: Report instance-level execution times that are this many seconds or longer. Default 10.\n :ivar raw_data: The raw data (a 2-D matrix). Default None.\n :ivar call_c: Boolean: whether to call compiled (C++) code. Default True if the module can find the compiled\n library, otherwise default False (with a printed warning).\n\n Instance variables (for PCA):\n\n :ivar pca_transform: Boolean: whether to do PCA rotation/truncation of the raw data when creating frame 0.\n Default True. (If False, all other PCA settings are ignored.)\n :ivar pca_num_dims: Integer number of PCA dimensions to use. Takes precedence over pca_var_threshold if set.\n Default None.\n :ivar pca_var_threshold: Threshold used for choosing number of PCA dimensions to use, representing required\n proportion of total cumulative variance (e.g., 0.9) in the PCA dimensions used. Ignored if pca_num_dims\n is set. Default None.\n :ivar raw_col_means: Stored column means of raw data. (Needed for PCA rotation/truncation of any new\n 'out-of-sample' points.) Default None.\n :ivar pca_eigvals: Stored vector of PCA eigenvalues (in descending order). Default None.\n :ivar pca_eigvecs: Stored matrix of PCA eigenvectors (columns correspond to entries in pca_eigvals).\n Default None.\n :ivar pca_cum_var: Stored vector of proportional cumulative variance for the first n PCA dimensions.\n Default None.\n\n Instance variables (for the basis):\n\n :ivar basis_size: Integer number of points to use in the basis. (All rows will be used by default for the basis\n if this value is not set.) Default None.\n :ivar basis_num_chunks: Choose the basis by partitioning all rows into this number of 'chunks'. (Multiple chunks\n offers a useful speedup when working with large numbers of rows.) Default 1.\n :ivar basis_rand_seed: Random seed, used to choose a random starting row for the basis. Ignored if\n basis_start_with_outlier is True. Default 1.\n :ivar basis_start_with_outlier: Boolean: whether to use the single greatest outlier row as the starting\n row for the basis. (Will be slow, O(n^2), for large numbers of points.) Default True.\n :ivar basis_row_nums: Stored list of row numbers for rows in the basis. Default None.\n :ivar non_basis_row_nums: Stored list of row numbers for rows not in the basis. Default None.\n :ivar basis_rows: Stored matrix of basis rows (i.e., data for the basis rows, taken from frame 0).\n Default None.\n\n Instance variables (for choosing sigma -- see method 'choose_sigma_for_basis'):\n\n :ivar overlap_min_threshold: Minimum overlap for non-basis rows. Default 0.5.\n :ivar overlap_mean_threshold: Minimum mean overlap for non-basis rows. Default 0.9.\n :ivar mean_row_distance: Stored estimated mean pairwise distance between rows in the data set. (See method\n 'estimate_mean_row_distance'). Default None.\n\n Instance variables (main DQM parameters):\n\n :ivar sigma: Width (standard deviation) of the multidimensional Gaussian placed around every data point.\n Default None.\n :ivar mass: Value of mass assigned to each data point during DQM evolution. Typically set manually by the\n user less often than sigma (see method 'default_mass_for_num_dims'). Default None.\n :ivar step: Time step used during DQM evolution. Typically set manually by the user rarely or never.\n Default 0.1.\n\n Instance variables (DQM operators):\n\n :ivar simt: Stored transpose of the 'similarity' matrix, used to convert state vectors from the 'raw'\n basis to the orthonormal basis of eigenstates. Default None.\n :ivar xops: Stored 3-D array of position-expectation operator matrices. (Each slice in 3rd dim is the\n operator matrix for the corresponding column/dimension in 2nd dim in 'frames'.)\n :ivar exph: Stored 'evolution' operator matrix. (This is the exponentiated time-evolution Hamiltonian\n operator matrix.) Has complex values. Default None.\n\n Instance variables (frames):\n\n :ivar stopping_threshold: A given data point is considered to have 'stopped' when it\n moves less than this distance from one frame to the next frame. Typically is automatically set to\n mean_row_distance / 1e6, but can be set manually. Default None.\n :ivar frames: Stored 3-D array of frames: . First slice in 3rd dim\n contains the original data (possibly PCA rotated/truncated), stored here before evolution has taken\n place. Default None.\n '''\n\n '''\n Note on Passing Arrays to C Code\n \n Numpy 'C-CONTIGUOUS' arrays are not actually row-major when they're 3-D -- it's the 3rd dimension that varies\n most quickly, not the 2nd dimension, in the underlying memory. So, in order to give the C code the ordering\n that it expects when passing a 3-D array, we need to put the column dimension in the 3rd dimension, like so:\n . This way, the 2nd dimension (column) is varying most quickly, which is\n what the C code expects (this is what 'row-major order' means). So now, when allocated memory is treated\n as 1-dimensional by the C code, sequential writes fill up frame 0 first, row by row, then frame 1, etc.,\n as desired. Afterward, we permute the dimensions back again here in the Python code, to make the 3-D\n array again the expected .\n '''\n\n\n ## static class variables\n # note: there is also a min_report_time instance variable, but there are some class-level operations to report on\n min_report_time = 10 # report execution times that are 10 seconds or longer\n\n\n def __init__(self):\n '''\n Constructor for the DQM class\n\n Initialize all member variables -- some have defaults, many default to None. Documentation of instance\n variables is in the class docstring (above).\n '''\n\n self.verbose = True\n self.min_report_time = 10\n\n self.raw_data = None\n\n if dqm_lib is None:\n print(\"## WARNING: in DQM constructor -- compiled-library code not found, setting 'call_c' to false\")\n self.call_c = False\n else:\n self.call_c = True\n # end if/else we have compiled-library code or not\n\n ## for PCA\n # note: if pca_transform is false, all other PCA settings are ignored\n self.pca_transform = True\n self.pca_num_dims = None\n self.pca_var_threshold = None\n self.raw_col_means = None\n self.pca_eigvals = None\n self.pca_eigvecs = None\n self.pca_cum_var = None\n\n ## for choosing and storing basis\n self.basis_size = None\n self.basis_num_chunks = 1\n self.basis_rand_seed = 1\n self.basis_start_with_outlier = True\n self.basis_row_nums = None\n self.non_basis_row_nums = None\n self.basis_rows = None\n\n ## for choosing sigma (see choose_sigma_for_basis)\n self.overlap_min_threshold = 0.5\n self.overlap_mean_threshold = 0.9\n\n ## main dqm parameters\n self.sigma = None\n self.mass = None\n self.step = 0.1\n\n self.mean_row_distance = None\n\n ## dqm operators\n self.simt = None\n self.xops = None\n self.exph = None\n\n ## frames\n self.stopping_threshold = None\n self.frames = None\n # end __init__ constructor\n\n\n def default_mass_for_num_dims(self, num_dims=None):\n '''\n Use a simple heuristic formula (derived from random-data experiments) to return a suggested\n default value of mass for a given number of dimensions:\n\n mass = -1 + 2 * log10(num_dims)\n\n We set a minimum default mass of 1, which overrides the heuristic for small numbers of\n dimensions (< 10), to avoid oscillation caused by a 'too transparent' landscape.\n\n Important note: for any given data set, the effective dimensionality of the data cloud might be\n significantly lower than the number of dimensions being used, which could affect the appropriateness\n of the suggested value of mass. We make no attempt to deal with that issue here.\n\n :param num_dims: Number of dimensions. if None, we attempt to infer the number of dimensions\n from self.frames (by size of 2nd dim). Default None.\n :return: Suggested default mass value for the given number of dimensions.\n '''\n\n '''\n 2FIX: consider ways to address the issue mentioned above, where we would suggest a default mass\n based on effective dimensionality of the data cloud, not just on the total number of dimensions\n being used.\n '''\n\n assert num_dims is not None or self.frames is not None,\\\n 'must have a number of dimensions (passed in or from self.frames) to determine a suggested default mass'\n if num_dims is None:\n num_dims = self.frames.shape[1]\n\n mass = -1 + 2 * log10(num_dims)\n # for small numbers of dimensions (< 10), make sure mass is positive and big enough to avoid oscillation\n mass = max(mass, 1)\n\n return mass\n # end method default_mass_for_num_dims\n\n\n def run_pca(self):\n '''\n Run PCA on self.raw_data (which must exist) and store results.\n\n :return: None\n '''\n\n t0 = time()\n\n assert type(self.raw_data) is np.ndarray and self.raw_data.ndim == 2, \\\n \"raw data must be 2-D ndarray in order to run PCA\"\n\n # store raw-data column means (important for out-of-sample operations on new points)\n self.raw_col_means = np.mean(self.raw_data, axis=0)\n\n # run PCA\n self.pca_eigvals, self.pca_eigvecs = pca(self.raw_data, self.verbose)\n assert min(self.pca_eigvals) >= 0, 'PCA eigenvalues must all be non-negative'\n\n # calculate cumulative variance of PCA dimensions (variance of each dimension is proportional to\n # the eigenvalue for that dimension)\n self.pca_cum_var = np.cumsum(self.pca_eigvals)\n self.pca_cum_var /= self.pca_cum_var[-1]\n\n t1 = time()\n\n if self.verbose:\n if t1 - t0 >= self.min_report_time:\n print(\"ran PCA in {} seconds\".format(round(t1 - t0)))\n if HAVE_PLT:\n self.plot_pca()\n else:\n print('# WARNING: need the matplotlib.pyplot package to do plots')\n # end method run_pca\n\n\n def clear_pca(self):\n '''\n Clear all PCA results (self.raw_col_means, self.pca_eigvals, self.pca_eigvecs and self.pca_cum_var).\n\n :return: None\n '''\n\n self.raw_col_means = None\n self.pca_eigvals = None\n self.pca_eigvecs = None\n self.pca_cum_var = None\n # end method clear_pca\n\n\n def plot_pca(self, num_dims=None):\n '''\n Display 3 PCA plots:\n\n * normalized eigenvalues (all divided by first eigenvalue)\n * log10 of normalized eigenvalues\n * proportional cumulative variance of data (all divided by total variance of data)\n\n Note: an assertion will fail if the Matplotlib PyPlot module is not loaded.\n\n :param num_dims: Number of PCA dimensions to show in the plots. Default None (meaning show all PCA dims).\n :return: None\n '''\n\n assert HAVE_PLT, \"must have loaded matplotlib.pyplot as plt to use plot_pca\"\n assert type(self.pca_eigvals) is np.ndarray, 'must have PCA eigenvalues to do PCA plots'\n\n if num_dims is None or num_dims > self.pca_eigvals.size:\n num_dims = self.pca_eigvals.size\n\n plt.figure(figsize=(20, 5))\n ax1 = plt.subplot(1, 3, 1)\n ax2 = plt.subplot(1, 3, 2)\n ax3 = plt.subplot(1, 3, 3)\n\n plt.axes(ax1)\n # normalized eigenvalues\n norm_eigvals = self.pca_eigvals[:num_dims] / (self.pca_eigvals[0])\n plt.plot(norm_eigvals, '-bo')\n plt.xlabel('dimension number (zero-based)')\n plt.ylabel('normalized eigenvalue')\n plt.title('PCA: Normalized Eigenvalues')\n\n plt.axes(ax2)\n max_idx = np.where(norm_eigvals > 0)[0][-1]\n log_norm_eigvals = np.log10(norm_eigvals[:max_idx])\n plt.plot(log_norm_eigvals, '-bo')\n plt.xlabel('dimension number (zero-based)')\n plt.ylabel('log10 of normalized eigenvalue')\n plt.title('PCA: Log10 of Normalized Eigenvalues')\n\n plt.axes(ax3)\n plt.plot(self.pca_cum_var[:num_dims], '-bo')\n plt.xlabel('dimension number (zero-based)')\n plt.ylabel('proportional cumulative variance')\n plt.title('PCA: Cumulative Variance')\n\n plt.show()\n # end method plot_pca\n\n\n def _choose_num_pca_dims(self):\n '''\n Return a number of PCA dimensions to use when creating the rotated/truncated frame-0 matrix.\n\n Logic:\n * If self.pca_num_dims and self.pca_var_threshold are both None, use all PCA dimensions.\n * Otherwise use self.pca_num_dims if set to a positive value.\n * Otherwise use self.pca_var_threshold.\n\n :return: Number of PCA dimensions to use\n '''\n\n assert self.pca_eigvals is not None, \"'self.pca_eigvals' must not be None (must have run PCA already)\"\n\n if self.pca_num_dims is None and self.pca_var_threshold is None:\n return self.pca_eigvals.size\n # end if using all PCA dimensions\n\n if self.pca_num_dims is not None:\n assert self.pca_num_dims > 0 and round(self.pca_num_dims) == self.pca_num_dims,\\\n f\"'self.pca_num_dims' must be a positive integer (currently set to {self.pca_num_dims})\"\n return self.pca_num_dims\n # end if using pca_num_dims\n\n ## otherwise, use pca_var_threshold\n\n assert 0 < self.pca_var_threshold <= 1,\\\n f\"'self.pca_var_threshold' must be in (0, 1] (currently set to {self.pca_var_threshold})\"\n\n if self.pca_var_threshold == 1:\n # make this case explicit to avoid machine-precision corner cases\n return self.pca_eigvals.size\n else:\n # find minimum number of dimensions that satisfies explained-variance threshold\n dim_idx = np.where(self.pca_cum_var >= self.pca_var_threshold)[0][0]\n return dim_idx + 1\n # end if/else pca_var_threshold is exactly 1 or not\n # end method _choose_num_pca_dims\n\n\n def create_frame_0(self, dat_raw=None, _num_pca_dims=None):\n '''\n Create frame 0 from raw data.\n\n If dat_raw is passed in, we return the created frame 0. Otherwise, we use self.raw_data and store the\n created frame 0 in self.frames.\n\n If self.pca_transform is True, frame 0 will be the PCA-rotated/truncated coordinates of each row.\n Otherwise, frame 0 will simply be the raw data.\n\n Note: if dat_raw is passed in and pca_transform is True, we apply the 'in-sample' PCA rotation/truncation\n derived originally from self.raw_data. It's important that any new 'out-of-sample' points be transformed\n using the in-sample PCA transformation. (For more detail, see the discussion of running new points in\n the user guide.)\n\n :param dat_raw: A raw-data matrix. if None, we use self.raw_data. default None.\n :param _num_pca_dims: ONLY USED BY INTERNAL CODE. Use self.pca_num_dims or self.pca_var_threshold instead.\n Default None.\n :return: If 'dat_raw' was passed in, we return frame 0. otherwise, we return None.\n '''\n\n if dat_raw is None:\n # do 'in-sample' setup based on self.raw_data, then create frame 0 and store it in self.frames\n\n assert self.raw_data is not None, 'must have raw data to build frame 0'\n assert self.frames is None or self.frames.shape[2] == 1, \\\n \"must not already have multiple frames when creating frame 0 (use clear_frames to clear frames)\"\n\n if self.pca_transform:\n assert _num_pca_dims is None, \"'_num_pca_dims' must not be passed in when building 'in-sample'\" \\\n \"version of frame 0 from self.raw_data (use self.pca_num_dims or\" \\\n \"self.pca_var_threshold)\"\n\n if self.pca_eigvecs is None:\n if self.verbose:\n print('running PCA...')\n self.run_pca()\n # end if PCA not run/stored yet\n\n _num_pca_dims = self._choose_num_pca_dims()\n # end if using PCA transformation\n\n # create frame 0 based on self.raw_data and store in self.frames\n self.frames = self.create_frame_0(self.raw_data, _num_pca_dims)\n\n return\n # end if using self.raw_data\n\n t0 = time()\n\n if self.pca_transform:\n # center the raw data (always using the 'in-sample' column means)\n assert dat_raw.shape[1] == self.raw_col_means.size, \"'dat_raw' must have the expected number of columns\"\n dat = dat_raw - self.raw_col_means\n\n # if _num_pca_dims is None, infer it from self.frames\n if _num_pca_dims is None:\n assert self.frames is not None and type(self.frames) is np.ndarray,\\\n \"'self.frames' must be an ndarray (when creating frame 0 for raw data passed in)\"\n _num_pca_dims = self.frames.shape[1]\n # end if pca_num_dims is None\n\n # rotate and truncate using the specified number of PCA dimensions\n if _num_pca_dims > self.pca_eigvals.size:\n if self.verbose:\n print('## WARNING: {} PCA dims requested, but only have {} -- using all {} PCA dimensions...'\n .format(_num_pca_dims, self.pca_eigvals.size, self.pca_eigvals.size))\n _num_pca_dims = self.pca_eigvals.size\n # end if too many PCA dims requested\n eigvecs = self.pca_eigvecs[:, :_num_pca_dims]\n # NOTE: numpy matrix multiplication seems to be really slow (sometimes?) for no good reason. if we\n # build each column of the final matrix separately and then cat them together, the whole thing goes\n # much faster.\n # 2FIX: IS THE PROBLEM THAT CHANGING DAT 'IN PLACE' IS CONFUSING THE NUMPY CALCULATIONS?\n # dat = dat @ eigvecs # this is (sometimes?) extremely slow\n new_dat = dat @ eigvecs[:, 0:1] # 0:1 indexing is to keep the column vector 2-D\n for dim_idx in range(1, eigvecs.shape[1]):\n new_dat = np.concatenate((new_dat, dat @ eigvecs[:, dim_idx:dim_idx + 1]), axis=1)\n dat = new_dat\n\n if self.verbose:\n print('using {} of {} PCA dimensions ({:.1f}% of total variance)'.\\\n format(_num_pca_dims, self.pca_eigvals.size, 100 * self.pca_cum_var[_num_pca_dims - 1]))\n else:\n # not doing PCA transformation -- just use raw data\n dat = dat_raw\n # end if/else doing PCA transformation or not\n\n # make frame 0 a 3-D array\n frame0 = dat[:, :, np.newaxis]\n\n t1 = time()\n if self.verbose and t1 - t0 >= self.min_report_time:\n print(\"created frame 0 in {} seconds\".format(round(t1 - t0)))\n\n return frame0\n # end method create_frame_0\n\n\n def clear_basis(self):\n '''\n Clear instance variables storing information about the basis, INCLUDING self.basis_size.\n\n Use this method to clear a basis when you want to return to the default behavior of using\n all rows as the basis.\n\n :return: None\n '''\n\n self.basis_size = None\n self.basis_row_nums = None\n self.non_basis_row_nums = None\n self.basis_rows = None\n # end method clear_basis\n\n\n def _set_basis(self, basis_row_nums=None):\n '''\n Set basis row nums and basis rows\n\n :param basis_row_nums: List of basis row numbers. If None, we use all rows as the basis.\n :return: None\n '''\n\n assert type(self.frames) is np.ndarray and self.frames.ndim == 3, 'must have frame 0 to set basis'\n\n if basis_row_nums is None:\n basis_row_nums = list(range(self.frames.shape[0])) # use all rows as the basis by default\n\n assert type(basis_row_nums) is list, 'basis_row_nums must be a list'\n assert len(basis_row_nums) > 0, 'basis_row_nums must have at least 1 row'\n assert min(basis_row_nums) >= 0 and max(basis_row_nums) < self.frames.shape[0] and \\\n len(basis_row_nums) <= self.frames.shape[0] and len(set(basis_row_nums)) == len(basis_row_nums), \\\n 'basis_row_nums must have valid, unique row numbers'\n\n self.basis_row_nums = copy(basis_row_nums)\n # find non-basis row numbers (using sets -- much faster than list comprehension for large number of rows)\n self.non_basis_row_nums = list(set(range(self.frames.shape[0])).difference(set(basis_row_nums)))\n self.basis_rows = np.copy(self.frames[basis_row_nums, :, 0]) # note: basis_rows is 2-D, not 3-D\n # end method _set_basis\n\n\n def build_operators(self, n_potential=None):\n '''\n Build the DQM operators and store them in the instance.\n\n If basis has not been set, we use all rows as the basis by default. (For large numbers of rows, this\n default will be unusably slow.)\n\n Note: the relative order of the basis rows is baked into the operators, so the relative ordering of\n the basis rows must not change later (when building frames).\n\n The operators are:\n\n * simt: Transpose of 'similarity' matrix, which converts state vectors from 'raw' basis of basis rows\n to orthonormal basis of eigenstates. Dimensions: .\n * xops: 3-D array, where slice i in 3rd dimension is the position-expectation operator matrix for data\n dimension i (in 2nd dimension in frames). Dimensions: .\n * exph: Complex-valued 'evolution' operator matrix (the exponentiated time-evolution Hamiltonian operator\n matrix). Dimensions: .\n\n :param n_potential: USED MAINLY FOR DEBUGGING AND SPEED TESTING. Use this number of rows to build\n the potential, starting from the first row. If None, we use all rows (not just the basis rows).\n Default None.\n :return: None\n '''\n\n assert type(self.frames) is np.ndarray and self.frames.ndim == 3, 'must have frame 0 to build operators'\n assert self.frames.shape[2] == 1, \\\n \"must not already have multiple frames when building operators (use clear_frames to clear frames)\"\n\n if self.mass is None:\n self.mass = self.default_mass_for_num_dims()\n if self.verbose:\n print('mass was not set -- setting mass to {:.3f} for {} dimensions'\n .format(self.mass, self.frames.shape[1]))\n # end if verbose\n # envd if mass was None\n\n assert self.sigma > 0 and self.step > 0 and self.mass > 0, \\\n 'all parameters (self.sigma, self.step, self.mass) must be positive to build operators'\n\n if self.basis_row_nums is None:\n if self.verbose:\n print(f'basis was not set -- using full basis (all {self.frames.shape[0]} rows)')\n self._set_basis()\n\n if n_potential is None:\n n_potential = self.frames.shape[0] # use all rows to build the potential\n\n num_basis_rows = self.basis_rows.shape[0]\n\n if self.verbose:\n print('building operators for {:,} basis rows and {:,} potential rows...'.\\\n format(num_basis_rows, n_potential))\n\n if num_basis_rows < self.frames.shape[0]:\n # MakeOperatorsC expects the basis rows to be first in the matrix of rows, so reorder the rows\n shuffled_row_nums = self.basis_row_nums + self.non_basis_row_nums\n # note: shuffling the rows causes numpy to make a copy, which is what we want (since we need\n # mat's underlying memory to be contiguous)\n mat = self.frames[shuffled_row_nums, :, 0]\n else:\n mat = self.frames[:, :, 0]\n\n mat = np.ascontiguousarray(mat)\n\n t0 = time()\n\n if self.call_c:\n simt = np.zeros((num_basis_rows, num_basis_rows), dtype=np.float64)\n\n # set up xops so that the C code sees the allocated memory in C-friendly order (see note on 3-D\n # arrays at the top of this file). xops is supposed to be \n # (and will be, see below)\n xops = np.zeros((self.frames.shape[1], num_basis_rows, num_basis_rows), dtype=np.float64)\n\n exph = np.zeros((num_basis_rows, num_basis_rows), dtype=np.complex128)\n\n if dqm_lib is not None:\n num_basis_vecs = dqm_lib.MakeOperatorsC(mat, self.frames.shape[0], self.frames.shape[1],\n num_basis_rows, n_potential, self.sigma, self.step,\n self.mass, simt, xops, exph)\n else:\n raise RuntimeError(\"in DQM instance, 'call_c' is True but compiled-library code not found\")\n\n # reorder the dimensions to make xops \n # (note: we make the array contiguous here because it will be contiguous if we save it to disk and then\n # load it again. making it contiguous here keeps things consistent for later operations.)\n xops = np.ascontiguousarray(np.transpose(xops, (1, 2, 0)))\n\n # if the number of basis eigenstates is less than the number of basis rows, we need to subselect the\n # output arrays appropriately. (note: we need to make copies when subselecting to keep the underlying\n # memory contiguous.)\n if num_basis_vecs < num_basis_rows:\n if self.verbose:\n print('number of eigenstates ({}) is less than number of basis rows ({}) -- \\\n subselecting operators...'.format(num_basis_vecs, num_basis_rows))\n # simt should be \n simt = np.copy(simt[:num_basis_vecs, :])\n # xops should be \n xops = np.copy(xops[:num_basis_vecs, :num_basis_vecs, :])\n # exph should be \n exph = np.copy(exph[:num_basis_vecs, :num_basis_vecs])\n # end if subselecting for num_basis_vecs\n else:\n simt, xops, exph = make_operators_python(mat, num_basis_rows, n_potential,\n self.sigma, self.step, self.mass)\n # end if/else calling C or Python\n\n t1 = time()\n if self.verbose and t1 - t0 >= self.min_report_time:\n print(\"built operators in {} seconds\".format(round(t1 - t0)))\n\n self.simt, self.xops, self.exph = simt, xops, exph\n # end method build_operators\n\n\n def choose_basis_by_distance(self):\n '''\n Choose and store a set of basis rows, based on Euclidean distance, and store the results.\n\n self.basis_size must be set to a positive number less than the number of rows in self.frames (which\n must exist).\n\n First basis row: if self.basis_start_with_outlier is True, we use the largest outlier (with the farthest\n nearest neighbor) as the first basis row. Otherwise, we choose a random row as the first basis row.\n\n Subsequent basis rows: we choose the non-basis row whose closest distance to any current basis row is\n largest, until the desired basis size is reached.\n\n If self.basis_num_chunks is > 1, we partition all rows into 'chunks' and choose basis rows separately for\n each chunk. (This is faster but less 'accurate', since 2 basis rows in 2 different chunks may be arbitrarily\n close to each other.)\n\n :return: None\n '''\n\n # if using multiple chunks, shuffle the rows to remove any bias in row ordering\n shuffle_rows = self.basis_num_chunks > 1\n # if shuffling the rows or choosing a random start row, we'll need a random generator\n randomizing = shuffle_rows or not self.basis_start_with_outlier\n\n assert type(self.frames) is np.ndarray and self.frames.ndim == 3, 'must have frame 0 to choose basis'\n frame0 = self.frames[:, :, 0]\n\n basis_row_nums = []\n num_rows = self.frames.shape[0]\n num_chunks = self.basis_num_chunks\n basis_size = self.basis_size\n\n assert basis_size is not None and round(basis_size) == basis_size,\\\n \"'self.basis_size' must be an integer value\"\n basis_size = int(basis_size)\n assert self.basis_size > 0 and self.basis_size < num_rows,\\\n 'desired basis size must be positive and less than number of rows to choose basis'\n\n if self.verbose:\n print('choosing {:,} basis rows by distance...'.format(basis_size))\n\n rng = None\n if randomizing:\n rng = np.random.default_rng(self.basis_rand_seed)\n\n if shuffle_rows:\n shuffled_row_idxs = rng.permutation(num_rows)\n frame0 = frame0[shuffled_row_idxs, :]\n\n # set up chunk row numbers\n num_per_chunk = int(np.ceil(num_rows / num_chunks))\n start_idxs = [num_per_chunk * i for i in range(num_chunks)]\n end_idxs = [min(num_per_chunk * (i + 1), num_rows) for i in range(num_chunks)]\n\n # set up chunk basis sizes\n chunk_basis_size = int(np.ceil(self.basis_size / num_chunks))\n chunk_basis_sizes = [chunk_basis_size for i in range(num_chunks)]\n if num_chunks > 1:\n # tweak basis size for last chunk to make total come out right\n chunk_basis_sizes[-1] = basis_size - sum(chunk_basis_sizes[:-1])\n\n t0 = time()\n\n # choose basis rows for each chunk\n for chunk_idx in range(num_chunks):\n chunk_basis_size = chunk_basis_sizes[chunk_idx]\n chunk_row_nums = list(range(start_idxs[chunk_idx], end_idxs[chunk_idx]))\n chunk_rows = frame0[chunk_row_nums, :]\n chunk_basis_idxs = self._choose_basis_by_distance_single_chunk(chunk_rows, chunk_basis_size, rng)\n # convert back to original (possibly shuffled) row numbers\n chunk_basis_row_nums = [chunk_row_nums[i] for i in chunk_basis_idxs]\n basis_row_nums += chunk_basis_row_nums\n # end for each chunk\n\n if shuffle_rows:\n # return to original row numbers\n basis_row_nums = shuffled_row_idxs[basis_row_nums].tolist()\n\n t1 = time()\n if self.verbose and t1 - t0 >= self.min_report_time:\n if num_chunks > 1:\n print('chose {} basis rows (in {} chunks) in {} seconds'.\\\n format(basis_size, num_chunks, round(t1 - t0)))\n else:\n print('chose {} basis rows in {} seconds'.format(basis_size, round(t1 - t0)))\n\n self._set_basis(basis_row_nums)\n # end method choose_basis_by_distance\n\n\n def _choose_basis_by_distance_single_chunk(self, rows, basis_size, rng):\n '''\n Choose basis_size number of rows from rows to act as a basis.\n\n See comments for choose_basis_by_distance for more details.\n\n :param rows: Matrix of rows\n :param basis_size: Number of rows to select for the basis\n :param rng: Numpy random-number generator\n :return: List of selected basis row numbers\n '''\n\n num_rows, num_cols = rows.shape\n assert basis_size < num_rows, 'desired basis size must be smaller than number of rows in chunk'\n\n if self.basis_start_with_outlier:\n # sentinel value, which tells later code to start with the largest outlier\n first_basis_row_num = -1\n else:\n # choose random row number as the first basis row\n assert rng is not None, 'must have random generator to start basis chunk with random row'\n first_basis_row_num = rng.integers(num_rows)\n\n if self.call_c:\n basis_row_nums = np.zeros(basis_size, dtype=np.int32)\n if dqm_lib is not None:\n dqm_lib.ChooseBasisByDistanceC(rows, num_rows, num_cols, basis_size, basis_row_nums,\n first_basis_row_num)\n else:\n raise RuntimeError(\"in DQM instance, 'call_c' is True but compiled-library code not found\")\n # end if/else have compiled-library instance or not\n else:\n basis_row_nums = choose_basis_by_distance_python(rows, basis_size, first_basis_row_num)\n # end if/else calling C or Python\n\n return basis_row_nums.tolist()\n # end method _choose_basis_by_distance_single_chunk\n\n\n def build_overlaps(self, rows=None, row_nums=None, sigma=None, batch_size=int(100e3), verbose=None):\n '''\n Build basis overlaps for a given set of rows.\n\n If 'rows' is passed in, we build overlaps for those rows. Otherwise, if row_nums is passed in,\n we build overlaps for those rows. Otherwise, we build overlaps for all non-basis rows.\n\n 'Overlap' is a measure of how well a given data point is represented by the basis. Basis points will\n all have an overlap of 1, meaning perfect representation. (For technical details, see the section on\n \"Reconstruction of Wave Functions in the Eigenbasis\" in the technical-summary document \"Understanding\n DQM\".)\n\n :param rows: 2-D array of data rows. Takes precedence if not None. Default None.\n :param row_nums: List of row numbers, used if rows is None. Default None.\n :param sigma: Value of sigma. If None, we use self.sigma. Default None.\n :param batch_size: Number of rows in a batch. (For very large numbers of rows, memory management can\n become an issue.) Default 100,000.\n :param verbose: Boolean: if not None, overrides self.verbose. Default None.\n :return: Vector containing scalar overlap value for each row.\n '''\n\n t0 = time()\n\n if verbose is None:\n verbose = self.verbose\n\n assert self.basis_row_nums is not None and self.basis_rows is not None, \\\n 'must have basis to build overlaps'\n\n if rows is None:\n if row_nums is None:\n # build overlaps for all non-basis rows by default\n assert len(self.non_basis_row_nums) > 0, \\\n \"must have some non-basis rows in order to build overlaps for them\"\n row_nums = self.non_basis_row_nums\n assert min(row_nums) >= 0 and max(row_nums) < self.frames.shape[0], \\\n 'must have valid row numbers to build overlaps'\n rows = self.frames[row_nums, :, 0]\n # end if/else (rows, row nums, or neither passed in)\n\n assert type(rows) is np.ndarray and (rows.ndim == 2 or (rows.ndim == 3 and rows.shape[2] == 1)), \\\n \"'rows' must be 2-D ndarray (or 3-D with 1 slice in dim 3)\"\n if rows.ndim == 3:\n rows = rows[:, :, 0] # make it 2-D\n\n if sigma is None:\n sigma = self.sigma\n assert sigma is not None and sigma > 0, 'must have positive value of sigma to build overlaps'\n\n num_rows = rows.shape[0]\n num_basis_rows = self.basis_rows.shape[0]\n\n assert self.frames.shape[1] == rows.shape[1], \\\n \"'rows' must have correct number of columns to build overlaps\"\n\n # a very large number of rows (more than a few million) causes Windows errors -- not sure why.\n # so, we run in batches.\n batch_size = int(batch_size)\n num_batches = int(np.ceil(num_rows / batch_size))\n\n if num_batches == 1:\n if self.call_c:\n overlaps = np.zeros(num_rows, dtype=np.float64)\n if dqm_lib is not None:\n dqm_lib.BuildOverlapsC(sigma, self.basis_rows, rows, num_basis_rows, num_rows,\n self.frames.shape[1], overlaps)\n else:\n raise RuntimeError(\"in DQM instance, 'call_c' is True but compiled-library code not found\")\n # end if/else have compiled-library instance or not\n else:\n overlaps = build_overlaps_python(sigma, self.basis_rows, rows)\n # end if/else calling C or Python\n else:\n overlaps = np.zeros(0) # empty vector\n for batch_idx in range(num_batches):\n start_idx = batch_size * batch_idx\n end_idx = min(num_rows, batch_size * (batch_idx + 1))\n batch_overlaps = self.build_overlaps(rows=rows[start_idx:end_idx, :], sigma=sigma,\n batch_size=batch_size, verbose=verbose)\n overlaps = np.concatenate((overlaps, batch_overlaps))\n # end for each batch\n # end if/else multiple batches or not\n\n t1 = time()\n if verbose and t1 - t0 >= self.min_report_time:\n print(\"built {:,} overlaps in {} seconds\".format(num_rows, round(t1 - t0)))\n\n return overlaps\n # end method build_overlaps\n\n\n def estimate_mean_row_distance(self, rel_err_threshold=0.01, rand_seed=500):\n '''\n Estimate the mean pairwise distance between rows in frame 0. (self.frames must exist.)\n\n Use a successively larger number of row pairs to estimate the overall mean distance, until the\n 'relative error' (standard error of the mean divided by the mean) drops below rel_err_threshold.\n\n The final result is stored in self.mean_row_distance.\n\n :param rel_err_threshold: Threshold for 'relative error' (standard error of mean divided by mean).\n Must be positive. Default 0.01.\n :param rand_seed: Random seed for choosing row pairs for distance calculations. Default 500.\n :return: None\n '''\n\n assert type(self.frames) is np.ndarray, 'frame 0 must exist to estimate mean distance between rows'\n assert rel_err_threshold > 0, f\"'rel_err_threshold must be positive', is currently {rel_err_threshold}\"\n\n rows = self.frames[:, :, 0]\n num_rows = rows.shape[0]\n\n rng = np.random.default_rng(rand_seed)\n shuffled_row_nums = rng.permutation(num_rows)\n\n # dists array will grow as needed (see below)\n dists_array_size = num_rows\n dists = np.zeros(dists_array_size)\n\n done = False\n row_idx1 = 0\n row_idx2 = 1\n num_pairs = 0\n while not done:\n # calculate store row-pair distance\n num_pairs += 1\n dists[num_pairs - 1] = np.linalg.norm(rows[shuffled_row_nums[row_idx1], :] -\n rows[shuffled_row_nums[row_idx2], :])\n\n # update row-pair indices\n row_idx2 += 1\n if row_idx2 == num_rows:\n row_idx1 += 1\n if row_idx1 == num_rows - 1:\n break # we've run out of row pairs\n else:\n row_idx2 = row_idx1 + 1\n # end if reached the end of the shuffled list of rows for row_idx2\n\n # calculate current relative error\n if num_pairs > 1:\n mu = np.mean(dists[:num_pairs])\n std = np.std(dists[:num_pairs])\n rel_err = std / np.sqrt(num_pairs) / mu\n done = rel_err <= rel_err_threshold\n # end if have multiple row-pair distances for calculations\n\n if num_pairs == dists.size and not done:\n # grow dists array as needed\n dists_array_size += num_rows\n new_dists = np.zeros(dists_array_size)\n new_dists[:num_pairs] = dists\n dists = new_dists\n # end if growing dists array\n # end while not done\n\n if self.verbose:\n report_precision = floor(log10(mu)) - 2\n print('estimated mean distance between rows is {:.{}f} (relative error {:.1f}%, from {:,} row pairs)'.\n format(mu, max(0, -report_precision), 100 * rel_err, num_pairs))\n\n self.mean_row_distance = mu\n\n return None\n # end method estimate_mean_row_distance\n\n\n def choose_sigma_for_basis(self, batch_size=None, num_batches_to_test=10, rand_seed=11):\n '''\n Choose the smallest value of sigma that meets overlap-threshold requirements (self.overlap_min_threshold\n and self.overlap_mean_threshold) for non-basis rows.\n\n self must already have frame 0 (in self.frames) and a selected basis that is smaller than the total number\n of rows (i.e., not a 'full' basis).\n\n We set self.sigma to the final resulting value of sigma.\n\n Default values for the arguments are good enough in most cases. (The 'batch' mode is for handling large\n numbers of non-basis rows more efficiently.)\n\n :param batch_size: Number of rows in a single batch. if None, test all non-basis rows at once.\n Default None.\n :param num_batches_to_test: Number of batches that must return the same value of sigma before\n we're done (if batch_size is not None). Default 10.\n :param rand_seed: random seed for shuffling the order of non-basis row numbers. Default 11.\n :return: None\n '''\n\n '''\n Batch logic\n\n * Using rand_seed, shuffle all non-basis rows into a random order.\n * If batch_size is None, pass all non-basis rows to _choose_sigma_for_rows in a single batch.\n * If batch_size is not None, call _choose_sigma_for_rows 1 batch at a time, continuing as long as\n the returned value of sigma is the same for every batch. When a mismatch occurs, increase the\n batch size by 25% and start over.\n * When num_batches_to_test batches all return the same value of sigma, we have our final selected value.\n '''\n\n assert self.basis_rows.shape[0] < self.frames.shape[0],\\\n 'must have some non-basis rows to choose sigma for basis'\n\n # shuffle all non-basis row numbers\n row_nums = np.array(self.non_basis_row_nums)\n num_rows = row_nums.size\n rng = np.random.default_rng(rand_seed)\n shuffled_row_nums = row_nums[rng.permutation(num_rows)]\n\n if batch_size is None:\n # if no batch size given, test all non-basis rows at once\n self.sigma = self._choose_sigma_for_rows(shuffled_row_nums)\n return\n # end if no batch size (testing all non-basis rows at once)\n\n done = False\n while not done:\n batch_size = int(batch_size)\n num_batches = int(np.ceil(num_rows / batch_size))\n num_batches_to_test = min(num_batches_to_test, num_batches)\n mismatch = False\n sigma = None\n\n if self.verbose:\n print('##### to choose sigma, testing {:,} batches (batch size {:,}) #####'.\n format(num_batches_to_test, batch_size))\n\n for batch_idx in range(num_batches_to_test):\n start_row_idx = batch_size * batch_idx\n end_row_idx = min(num_rows, batch_size * (batch_idx + 1))\n batch_row_nums = shuffled_row_nums[start_row_idx:end_row_idx]\n\n batch_sigma = self._choose_sigma_for_rows(batch_row_nums, verbose=False)\n if self.verbose:\n print('batch {}: sigma is {}'.format(batch_idx, batch_sigma))\n if sigma is None:\n sigma = batch_sigma\n else:\n if sigma != batch_sigma:\n if self.verbose:\n print(\"sigma values don't agree -- increasing batch size...\")\n mismatch = True\n batch_size *= 1.25\n break\n # end if/else first batch or not\n # end for each test batch\n\n done = not mismatch\n # end while not done\n\n self.sigma = sigma\n # end method choose_sigma_for_basis\n\n\n def _choose_sigma_for_rows(self, row_nums, verbose=None):\n '''\n For the given row_nums, use a binary search to find the smallest value of sigma that meets\n overlap-threshold requirements (self.overlap_min_threshold and self.overlap_mean_threshold).\n\n self must already have frame 0 (in self.frames) and a selected basis.\n\n We determine precision of the search as follows:\n\n * Estimate mean distance between rows by calling self.estimate_mean_row_distance (if needed).\n * Set precision at least 2 orders of magnitude below the mean distance:\n precision = 10 ** (floor(log10(mean_distance)) - 2).\n For example, if mean distance is 20, precision will be 0.1.\n * Precision determines the smallest allowed step from one value of sigma to the next.\n\n The first search value for sigma is 10 * precision. If that first value of sigma is good, we divide\n sigma by 2 until we find a bad value. (If the largest bad value of sigma is below the current\n precision level, we increase precision as needed.) If that first value of sigma is bad, we multiply\n sigma by 2 until we find a good value. Once we have a bad value and a good value, we proceed by binary\n search, until we have a bad value and a good value within one precision step of each other. The larger,\n good, value is the final selected value of sigma.\n\n :param row_nums: List/array of row numbers to use for testing overlaps (must not include any basis\n row numbers).\n :param verbose: Boolean: if not None, overrides self.verbose. Default None.\n :return: Final value of sigma.\n '''\n\n if verbose is None:\n verbose = self.verbose\n\n # intersect row_nums and self.basis_row_nums, make sure they're disjoint\n basis_row_nums = np.array(list(set(row_nums).intersection(set(self.basis_row_nums))))\n assert basis_row_nums.size == 0, \"must not have any basis row numbers in 'row_nums'\"\n\n rows = self.frames[row_nums, :, 0]\n\n if self.mean_row_distance is None:\n self.estimate_mean_row_distance()\n # precision is at least 2 orders of magnitude below mean distance\n precision = 10 ** (floor(log10(self.mean_row_distance)) - 2)\n\n # use 'epsilon' to avoid machine-precision issues in comparisons\n eps = precision / 1e4\n\n # don't allow precision to shrink more than another 3 orders of magnitude (so, still 10 times\n # bigger than epsilon)\n min_precision = (precision / 1e3) - eps\n\n # starting value of sigma\n sigma = 10 * precision\n\n # do binary search within this range, once both of these values are non-zero\n sigma_range = [0, 0]\n\n if verbose:\n print('choosing sigma to precision of {:.1e} for basis of size {:,}...'.\n format(precision, self.basis_rows.shape[0]))\n\n done_searching = False\n while not done_searching:\n # test current value of sigma\n overlaps = self.build_overlaps(rows=rows, sigma=sigma)\n min_overlap = np.min(overlaps)\n mean_overlap = np.mean(overlaps)\n sigma_is_good = min_overlap >= self.overlap_min_threshold and mean_overlap >= self.overlap_mean_threshold\n if verbose:\n print('for sigma = {:.{}f}: min overlap {:.3f}, mean overlap {:.3f}{}'.\n format(sigma, max(0, round(-log10(precision))), min_overlap, mean_overlap,\n ' (GOOD)' if sigma_is_good else ''))\n\n if sigma_is_good:\n # search for smaller sigma (so, current sigma now defines the top of the search range)\n sigma_range[1] = sigma\n doing_binary_search = sigma_range[0] > 0\n if not doing_binary_search:\n # if we're at minimum sigma for current precision, we need to increase precision\n if sigma < precision + eps:\n precision /= 10\n assert precision >= min_precision, \\\n 'precision must not go below {:.1e}'.format(min_precision)\n if verbose:\n print('increasing precision to {:.1e}...'.format(precision))\n # end if increasing precision\n sigma = round(sigma / 2, round(-log10(precision))) # search for smaller sigma\n assert sigma > 0, 'sigma must always be positive'\n else:\n # search for larger sigma (so, current sigma now defines the bottom of the search range)\n sigma_range[0] = sigma\n doing_binary_search = sigma_range[1] > 0\n if not doing_binary_search:\n sigma = round(sigma * 2, round(-log10(precision))) # search for larger sigma\n # end if/else searching for smaller or larger sigma\n\n if doing_binary_search:\n sigma = round(np.mean(sigma_range), round(-log10(precision)))\n assert sigma > 0, 'sigma must always be positive'\n\n # to be done, top and bottom of search range must both be positive, and search range must be\n # at or below current precision\n done_searching = sigma_range[0] > 0 and sigma_range[1] > 0 and \\\n sigma_range[1] - sigma_range[0] < precision + eps\n # end while not done searching\n\n sigma = sigma_range[1]\n if verbose:\n print('final sigma is {:.{}f}'.format(sigma, max(0, round(-log10(precision)))))\n\n return sigma\n # end method _choose_sigma_for_rows\n\n\n def _stopped_row_nums(self, frames=None):\n '''\n For the given set of frames, return a list of row numbers for rows that have stopped (according to\n self.stopping_threshold).\n\n :param frames: A 3-D array of frames. If None, we use self.frames (which must exist). Default None.\n :return: List of stopped row numbers.\n '''\n\n if frames is None:\n assert self.frames is not None, 'must have frames in order to determine stopped row numbers'\n return self._stopped_row_nums(self.frames)\n # end if frames not passed in\n\n assert type(frames) is np.ndarray and frames.ndim in [2, 3], \"'frames' must be a 2-D or 3-D ndarray\"\n\n assert self.stopping_threshold is not None, 'must have stopping_threshold to determine stopped rows'\n\n if frames.ndim < 3 or frames.shape[2] <= 1:\n return [] # need 2 frames to determine stopping\n\n last_deltas = frames[:, :, -1] - frames[:, :, -2]\n dists = np.linalg.norm(last_deltas, axis=1)\n num_rows = frames.shape[0]\n stopped_row_nums = [i for i in range(num_rows) if dists[i] < self.stopping_threshold]\n return stopped_row_nums\n # end method _stopped_row_nums\n\n\n def set_stopping_threshold(self):\n '''\n Set self.stopping_threshold to self.mean_row_distance / 1e6. (First call self.mean_row_distance, if needed.)\n\n :return: None.\n '''\n\n if self.mean_row_distance is None:\n self.estimate_mean_row_distance()\n self.stopping_threshold = self.mean_row_distance / 1e6\n if self.verbose:\n print('set stopping threshold to {:.2e}'.format(self.stopping_threshold))\n # end method set_stopping_threshold\n\n\n def build_frames(self, num_frames_to_build=100, frames=None, pare_frames=True, verbose=None):\n '''\n Build new frames in a DQM evolution and concatenate them with existing frames.\n\n Instance must have basis rows, operators, and positive sigma.\n\n If 'frames' is passed in, we return all frames (old and new together). otherwise, we set\n self.frames to be all frames (old and new together) and return None.\n\n Stopped rows are not evolved further. (A row is 'stopped' when it fails to move at least\n self.stopping_threshold distance from one frame to the next.)\n\n :param num_frames_to_build: Number of new frames to build. Default 100.\n :param frames: 3-D array of existing frames (). If None, we use\n self.frames. Default None.\n :param pare_frames: Boolean: if True, we delete any final frames where nothing is changing. Default True.\n :param verbose: Boolean: if not None, overrides self.verbose. Default None.\n :return: If 'frames' was passed in, we return all frames (old and new together). Otherwise,\n we return None.\n '''\n\n if verbose is None:\n verbose = self.verbose\n\n if frames is None:\n self.frames = self.build_frames(num_frames_to_build, self.frames, pare_frames, verbose=verbose)\n return\n # end if using self.frames\n\n assert type(frames) is np.ndarray and frames.ndim == 3, \"'frames' must be a 3-D ndarray\"\n assert num_frames_to_build > 0, \"'num_frames_to_build' must be positive\"\n assert self.sigma is not None and self.sigma > 0, 'sigma must be positive'\n assert self.basis_rows is not None, 'must have basis rows to build frames'\n assert self.simt is not None and self.xops is not None and self.exph is not None, \\\n 'must have operators to build frames'\n\n num_rows, num_cols = frames.shape[:2]\n current_frame = np.copy(frames[:, :, -1]) # make a copy to be sure memory is contiguous\n\n assert num_cols == self.frames.shape[1], \"'frames' must have correct number of columns\"\n\n if self.stopping_threshold is None:\n self.set_stopping_threshold()\n\n # deal with stopped rows\n stopped_row_nums = self._stopped_row_nums(frames)\n not_stopped_row_nums = list(set(list(range(num_rows))).difference(set(stopped_row_nums)))\n if len(not_stopped_row_nums) == 0:\n if verbose:\n print('all rows have stopped -- no frames added')\n if pare_frames:\n frames = self.pare_frames(frames)\n return frames\n # end if all rows stopped\n num_evolving_rows = len(not_stopped_row_nums)\n\n have_stopped_rows = len(stopped_row_nums) > 0\n if have_stopped_rows:\n current_frame = current_frame[not_stopped_row_nums, :]\n\n t0 = time()\n\n if self.call_c:\n # set up new_frames so that the C code sees the allocated memory in C-friendly order (just as we\n # did with xops in build_operators, which see). new_frames is supposed to be\n # (and will be, see below)\n new_frames = np.zeros((num_frames_to_build, num_evolving_rows, num_cols), dtype=np.float64)\n\n # shuffle the xops data into C-friendly order (see note at top of file)\n xops = np.ascontiguousarray(np.transpose(self.xops, (2, 0, 1)))\n\n num_basis_vecs = self.exph.shape[0]\n if dqm_lib is not None:\n dqm_lib.BuildFramesAutoC(new_frames, num_evolving_rows, num_cols, num_frames_to_build, current_frame,\n self.basis_rows, self.basis_rows.shape[0], self.simt, num_basis_vecs,\n xops, self.exph, self.sigma, self.stopping_threshold)\n else:\n raise RuntimeError(\"in DQM instance, 'call_c' is True but compiled-library code not found\")\n # end if/else have compiled-library instance or not\n\n # make new_frames \n # (note: new_frames will now not be C_CONTIGUOUS, but everything else we're going to do with\n # new_frames is here in Python, so we don't care about the underlying memory order anymore,\n # we just let numpy handle it.)\n new_frames = np.transpose(new_frames, (1, 2, 0))\n else:\n # call the Python-only version\n new_frames = build_frames_python(num_frames_to_build, current_frame, self.basis_rows, self.simt,\n self.xops, self.exph, self.sigma, self.stopping_threshold)\n # end if/else calling C or Python\n\n t1 = time()\n if verbose and t1 - t0 >= self.min_report_time:\n print(\"built {} frames in {} seconds\".format(num_frames_to_build, round(t1 - t0)))\n\n if have_stopped_rows:\n # fill the stopped rows forward\n new_frames_all = frames[:, :, -1][:, :, np.newaxis] # current frame (unsubselected, 3-D)\n new_frames_all = np.repeat(new_frames_all, num_frames_to_build, axis=2)\n # overwrite where we have new data for evolving rows\n new_frames_all[not_stopped_row_nums, :, :] = new_frames\n else:\n new_frames_all = new_frames\n # end if/else any stopped rows or not\n\n frames = np.concatenate((frames, new_frames_all), axis=2)\n\n if pare_frames:\n frames = self.pare_frames(frames)\n\n return frames\n # end method build_frames\n\n\n def build_frames_auto(self, batch_size=100, frames=None, pare_frames=True, max_num_frames=int(1e4)):\n '''\n Add new frames in batches (by calling build_frames) until all rows have stopped.\n\n If 'frames' is passed in, we return all frames (old and new together). Otherwise, we set\n self.frames to be all frames (old and new together) and return None.\n\n :param batch_size: Number of new frames to add in each batch. Default 100.\n :param frames: 3-D array of frames (). If None, we use self.frames.\n Default None.\n :param pare_frames: Boolean: if True, we delete any final frames where nothing is changing. Default True.\n :param max_num_frames: Maximum number of frames, including any initial frames. Default 10,000. (This\n parameter is important because a too small value of mass can cause data points to oscillate around a\n minimum -- overshooting the minimum at each step -- meaning that they will never stop moving.)\n :return: If 'frames' was passed in, we return all frames (old and new together). Otherwise,\n we return None.\n '''\n\n if frames is None:\n self.frames = self.build_frames_auto(batch_size, self.frames, pare_frames, max_num_frames)\n return\n # end if using self.frames\n\n assert type(frames) is np.ndarray and frames.ndim == 3, \"'frames' must be a 3-D ndarray\"\n assert self.sigma is not None and self.sigma > 0, 'sigma must be positive'\n\n if self.stopping_threshold is None:\n self.set_stopping_threshold()\n assert self.stopping_threshold >= 1e-10 * self.sigma, \\\n 'stopping_threshold must be >= 1e-10 * sigma (to prevent build_frames_auto from running forever)'\n\n num_frames_start = frames.shape[2]\n\n t0 = time()\n\n num_frames1 = -1\n num_frames2 = 0\n while num_frames2 > num_frames1:\n if self.verbose:\n print(f'adding {batch_size} frames...')\n num_frames1 = frames.shape[2]\n frames = self.build_frames(batch_size, frames, pare_frames=False, verbose=False)\n num_frames2 = frames.shape[2]\n if num_frames2 >= max_num_frames:\n if self.verbose:\n print(f'WARNING: have reached or exceeded max num frames of {max_num_frames}\\\n (current num frames is {num_frames2})')\n break\n # end while still adding frames\n\n if pare_frames:\n frames = self.pare_frames(frames)\n\n t1 = time()\n\n if self.verbose:\n num_frames_end = num_frames2\n print(f'added a total of {num_frames_end - num_frames_start} frames in {round(t1 - t0)} seconds')\n\n return frames\n # end method build_frames_auto\n\n\n def pare_frames(self, frames):\n '''\n Drop any duplicate frames (where frame n + 1 is identical to frame n) at the end of an evolution.\n\n :param frames: A 3-D array of frames. No default.\n :return: A pared 3-D array of frames (which will be a reference to the frames passed in if no\n frames were dropped).\n '''\n\n assert type(frames) is np.ndarray and frames.ndim == 3, \"'frames' must be a 3-D array\"\n num_frames = frames.shape[2]\n\n if num_frames <= 1:\n return frames\n\n # use binary search to find the last time a pair of consecutive frames differs\n start_idx = 0\n end_idx = num_frames - 1\n done = False\n while not done:\n if end_idx - start_idx == 1:\n done = True\n start_same_as_end = np.array_equal(frames[:, :, start_idx], frames[:, :, end_idx])\n if start_same_as_end:\n keep_idx = start_idx\n else:\n keep_idx = end_idx\n else:\n # indices are more than 1 apart\n mid_idx = round((start_idx + end_idx) / 2)\n mid_same_as_end = np.array_equal(frames[:, :, mid_idx], frames[:, :, end_idx])\n if mid_same_as_end:\n end_idx = mid_idx # search downward (all frames in top half assumed to be the same)\n else:\n start_idx = mid_idx # search upward (all frames in bottom half assumed to be different)\n # end if/else indices only 1 apart or not\n # end while doing binary search\n\n return frames[:, :, :keep_idx + 1]\n # end method pare_frames\n\n\n def clear_frames(self, keep_frame_0=True):\n '''\n Keep frame 0 and clear all frames in self.frames after frame 0.\n\n (Note that create_frame_0 and build_operators will both fail if self.frames has multiple frames in 3rd dim.\n This is to prevent accidental loss of information, particularly since building frames can be slow.)\n\n :param keep_frame_0: Boolean: if False, we set self.frames to None. default True.\n :return: None\n '''\n\n if self.frames is None:\n if self.verbose:\n print(\"'self.frames' is not an array -- no frames to clear\")\n elif not keep_frame_0:\n self.frames = None\n else:\n # 'reset' to frame 0\n self.frames = self.frames[:, :, 0:1] # 0:1 indexing is to keep frame 0 as 3-D array\n # end method clear_frames\n\n\n def pca_projection(self, dat_raw=None, num_pca_dims=None):\n '''\n Apply PCA 'projection' (centering + rotation + truncation) to a raw data matrix, as follows:\n\n * Center the columns by subtracting self.raw_col_means (which must exist).\n * Create a rotated and truncated matrix by applying the combination of self.pca_eigvecs (rotation) and\n the number of PCA dimensions being used (truncation). (Number of PCA dimensions can be specified via\n the num_pca_dims parameter. If num_pca_dims is None, we infer the number of PCA dimensions being used\n from the 2nd dimension of self.frames, which must then exist.)\n * Calculate the proportional norms for each row in the raw data -- meaning, the centered/rotated/truncated\n L2 norm divided by the centered-only L2 norm.\n\n The instance must have stored PCA results.\n\n Importantly, if dat_raw is passed in, we apply the 'in-sample' PCA projection based on the original\n self.raw_data, *not* based on this new raw data. (For more detail, see the discussion of running new\n points in the user guide.)\n\n :param dat_raw: 2-D raw-data matrix. If None, we use self.raw_data. Default None.\n :param num_pca_dims: Number of PCA dimensions to use in the projection. If None, we infer from the\n number of columns in 2nd dimension of self.frames (which must then exist). default None.\n :return: A vector of proportional norms (projected / original) for each row.\n '''\n\n '''\n 2FIX: add checks/warnings for this (probably very unlikely?) corner case\n\n if, after centering the data cloud, a data point is exactly at (or within machine precision of) the\n origin, then calculations here wil fail: either the original L2 norm will actually be zero (producing\n a divide-by-zero error), or the proportion of norms for this point will be dominated by noise.\n \n it's a little easier to imagine this case coming up in a scenario involving discrete data (e.g., many\n binary dimensions)...\n '''\n\n if dat_raw is None:\n dat_raw = self.raw_data\n assert type(dat_raw) is np.ndarray and dat_raw.ndim == 2, \"'dat_raw' must be a 2-D ndarray\"\n\n # always use in-sample column means\n assert self.raw_col_means is not None, 'must have raw column means'\n assert self.raw_col_means.size == dat_raw.shape[1], \"'dat_raw' must have correct number of columns\"\n\n if num_pca_dims is None:\n assert self.frames is not None and type(self.frames) is np.ndarray and self.frames.ndim == 3,\\\n \"must have 'self.frames' to infer number of PCA dimensions being used\"\n num_pca_dims = self.frames.shape[1]\n # end if num_pca_dims is None\n\n t0 = time()\n\n dat = dat_raw - self.raw_col_means\n\n # get original row norms (after centering)\n norms_orig = np.linalg.norm(dat, axis=1)\n\n # get rotated/truncated row norms\n dat_rotated = dat @ self.pca_eigvecs[:, :num_pca_dims]\n norms_rotated = np.linalg.norm(dat_rotated, axis=1)\n\n norm_props = norms_rotated / norms_orig\n\n # error check: there should be no loss of information in the 'full' rotation (with no subspace\n # truncation/projection)\n #\n # NOTE: this actually isn't always true. if the number of points/rows is less than the number of raw\n # dimensions when PCA is first run -- for example, if there are 20 points in 30 raw dimensions -- then\n # there will only be 20 PCA dimensions in total. these 20 PCA dimensions are enough to fully describe\n # the initial 20 points, but they are not enough to fully describe any arbitrary new point in the 30\n # raw dimensions (that would, of course, require 30 dimensions).\n dat_rotated_full = dat @ self.pca_eigvecs\n norms_rotated_full = np.linalg.norm(dat_rotated_full, axis=1)\n norm_props_full = norms_rotated_full / norms_orig\n if self.verbose and np.min(norm_props_full) < 0.999:\n print('WARNING: minimum norm proportion in full PCA rotation (with no subspace projection) is {:.4f}'.\n format(np.min(norm_props_full)))\n\n t1 = time()\n if self.verbose and t1 - t0 >= self.min_report_time:\n print(\"calculated PCA-projection proportions in {} seconds\".format(round(t1 - t0)))\n\n return norm_props\n # end method pca_projection\n\n\n def run_new_points(self, dat_raw_oos):\n '''\n Given dat_raw_oos, which is a raw-data matrix of new ('out-of-sample') points:\n\n * Apply the 'in-sample' PCA projection (subtract in-sample column means, apply in-sample\n PCA rotation, and truncate to in-sample number of PCA dimensions being used).\n * Build basis overlaps for the new points.\n * Evolve the new out-of-sample points, using the in-sample map (that is, the stored DQM operators),\n to as many frames as currently exist in self.frames.\n\n Important note: for running new out-of-sample points to make sense, the new raw data must be preprocessed\n in exactly the same way that the original raw data was.\n\n :param dat_raw_oos: A 2-D raw-data matrix of new 'out-of-sample' points. Must have the same number of\n columns as self.raw_data.\n :return: A tuple of:\n\n * frames_oos: 3-D array of out-of-sample evolved frames\n * overlaps_is: vector of in-sample basis overlaps (for all non-basis rows)\n * overlaps_oos: vector of out-of-sample basis overlaps\n * norm_props_is: vector of in-sample proportional norms (projected L2 norms divided by\n original L2 norms)\n * norm_props_oos: vector of out-of-sample proportional norms (projected L2 norms divided by\n original L2 norms)\n '''\n\n '''\n 2FIX: create parameter to specify number of frames to build for new points?\n * as many as in self.frames (current default)\n * as many frames as needed for new points to stop\n * explicitly specified number of frames\n\n 2FIX: add option where new points below specified thresholds for PCA-transformation proportional norms\n ('off the map') or basis overlaps ('in a blank spot on the map') are not evolved at all? (the fact that\n a low-overlap point ‘snaps’ closer to the basis points at the beginning of evolution is confusing and\n misleading. [ADDRESS THIS ISSUE MORE GENERALLY SOMEHOW?])\n '''\n\n assert type(dat_raw_oos) is np.ndarray and dat_raw_oos.ndim == 2, \\\n \"'dat_raw_oos' must be a 2-D ndarray\"\n assert dat_raw_oos.shape[1] == self.raw_data.shape[1], \\\n \"'dat_raw_oos must have the same number of columns as self.raw_data\"\n\n if self.pca_transform:\n assert type(self.raw_col_means) is np.ndarray and self.raw_col_means.ndim == 1, \\\n \"must have raw column means to run new out-of-sample points\"\n\n # do the in-sample PCA projection for both in-sample points and out-of-sample points\n norm_props_is = self.pca_projection()\n norm_props_oos = self.pca_projection(dat_raw_oos)\n\n if self.verbose and HAVE_PLT:\n # plot histograms of in-sample and out-of-sample subspace proportions\n plt.figure(figsize=(22, 8))\n ax1 = plt.subplot(1, 2, 1)\n ax2 = plt.subplot(1, 2, 2)\n label_font = {'size': 15}\n title_font = {'size': 17}\n num_bins = 50\n plt.axes(ax1)\n plt.hist(norm_props_is, bins=num_bins)\n plt.xlabel('subspace norm as proportion of original norm', fontdict=label_font)\n plt.ylabel('count', fontdict=label_font)\n plt.title('In-Sample Proportion of L2 Norms for PCA Subspace Projection', fontdict=title_font)\n plt.axes(ax2)\n plt.hist(norm_props_oos, bins=num_bins)\n plt.xlabel('subspace norm as proportion of original norm', fontdict=label_font)\n plt.ylabel('count', fontdict=label_font)\n plt.title('Out-of-Sample Proportion of L2 Norms for PCA Subspace Projection', fontdict=title_font)\n plt.show()\n # end if verbose\n else:\n norm_props_is = None\n norm_props_oos = None\n # end if using pca transformation\n\n frame0_oos = self.create_frame_0(dat_raw_oos)\n\n # build in-sample and out-of-sample basis overlaps\n full_basis = self.basis_rows.shape[0] == self.frames.shape[0]\n if full_basis:\n overlaps_is = np.ones(self.frames.shape[0], dtype=np.float64)\n else:\n overlaps_is = self.build_overlaps()\n overlaps_oos = self.build_overlaps(rows=frame0_oos)\n\n if self.verbose and HAVE_PLT:\n # plot histograms of in-sample and out-of-sample overlaps\n if full_basis:\n print('NOTE: full basis, no in-sample non-basis rows to evaluate -- all in-sample overlaps are 1')\n plt.figure(figsize=(22, 8))\n ax1 = plt.subplot(1, 2, 1)\n ax2 = plt.subplot(1, 2, 2)\n label_font = {'size': 15}\n title_font = {'size': 17}\n num_bins = 50\n plt.axes(ax1)\n plt.hist(overlaps_is, bins=num_bins)\n plt.xlabel('basis overlap', fontdict=label_font)\n plt.ylabel('count', fontdict=label_font)\n plt.title('Basis Overlaps for In-Sample Non-Basis Rows', fontdict=title_font)\n plt.axes(ax2)\n plt.hist(overlaps_oos, bins=num_bins)\n plt.xlabel('basis overlap', fontdict=label_font)\n plt.ylabel('count', fontdict=label_font)\n plt.title('Basis Overlaps for Out-of-Sample Rows', fontdict=title_font)\n plt.show()\n # end if verbose\n\n # run dqm evolution of out-of-sample points, using in-sample operators and parameter values\n num_frames_to_build = self.frames.shape[2] - 1\n frames_oos = self.build_frames(num_frames_to_build, frames=frame0_oos)\n\n return frames_oos, overlaps_is, overlaps_oos, norm_props_is, norm_props_oos\n # end method run_new_points\n\n\n def run_simple(self, dat_raw, sigma):\n '''\n Do a simplified full DQM 'run', as follows -- given dat_raw (raw-data matrix) and a value of sigma:\n\n * Store raw data and sigma in the instance.\n * Create and store frame 0 (using all PCA dimensions)\n * Build and store operators (using a full basis and default value of mass)\n * Build and store frames (using build_frames_auto) until all points stop moving\n\n Note: default behaviors can be overridden by setting relevant instance parameters before calling this method.\n\n For small data sets, doing simple runs with various values of sigma can be the quickest way to understand\n the landscape that DQM is revealing.\n\n :param dat_raw: Raw data (2-D matrix).\n :param sigma: Positive value for sigma.\n :return: None\n '''\n\n assert type(dat_raw) is np.ndarray and dat_raw.ndim == 2,\\\n \"'dat_raw' must be 2-D ndarray\"\n assert sigma > 0, \"'sigma' must be positive\"\n\n self.raw_data = dat_raw\n self.sigma = sigma\n\n self.create_frame_0()\n self.build_operators()\n self.build_frames_auto()\n # end method run_simple\n\n\n @classmethod\n def exists(cls, main_dir, sub_dir=None):\n '''\n Check whether main_dir contains a saved DQM instance. If sub_dir is not None, also check\n whether sub_dir contains saved DQM info.\n\n :param main_dir: Relative or absolute path to a folder. No default.\n :param sub_dir: Name of subdirectory (inside the 'main_dir' folder) for landscape-specific saved data.\n Default None.\n :return: Boolean: True if main_dir contains a saved DQM instance. (If sub_dir is not None,\n sub_dir must also have saved landscape-specific DQM info in order for us to return True.)\n '''\n\n member_path = os.path.join(main_dir, 'dqm_members')\n if not os.path.exists(member_path):\n return False\n elif sub_dir is None:\n return True\n else:\n sub_member_path = os.path.join(main_dir, sub_dir, 'dqm_members')\n return os.path.exists(sub_member_path)\n # end class method exists\n\n\n # 2FIX: IT'S BRITTLE THAT ANY CHANGES IN MEMBER VARIABLES MUST BE MADE MANUALLY HERE AS WELL\n # 2FIX: IF A MEMBER VARIABLE IS CLEARED (SET TO NONE) IN MEMORY AND THEN THE INSTANCE IS SAVED\n # TO DISK, AN OLD SAVED VERSION OF THAT MEMBER VARIABLE COULD STILL EXIST ON DISK, AND WOULD\n # THUS BE LOADED NEXT TIME, PUTTING THE INSTANCE IN AN INCONSISTENT STATE. ADD LOGIC TO DELETE\n # FILES ON DISK IF THE MEMBER IS NONE. (EXCEPT FOR RAW DATA, WHICH MIGHT NOT BE LOADED, JUST\n # FOR SPEED. THINK THIS LOGIC THROUGH MORE CAREFULLY...)\n def save(self, main_dir, sub_dir=None):\n '''\n Save an instance of the DQM class:\n\n * Save numpy arrays separately.\n * Pickle everything else in the instance.\n\n Things that are common to multiple landscapes (raw data, PCA results) are saved in main_dir, which can\n be an absolute or relative path to a folder.\n\n Things that are specific to a given landscape (basis, DQM parameters, operators, evolved frames) are\n saved in sub_dir, which is relative to main_dir (so, typically sub_dir is just a folder name)\n\n Both main_dir and sub_dir (if not None) are created if they do not exist.\n\n :param main_dir: Relative or absolute path to a folder. No default.\n :param sub_dir: Name of subdirectory (inside the 'main_dir' folder) for basis-specific saved data.\n Default None.\n :return: None\n '''\n\n t0 = time()\n\n if not os.path.exists(main_dir):\n os.makedirs(main_dir)\n if sub_dir:\n sub_dir = os.path.join(main_dir, sub_dir)\n if not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n\n ## things that are common to all landscapes (based on different bases, parameter values, and operators)\n if self.raw_data is not None:\n np.save(os.path.join(main_dir, 'raw_data.npy'), self.raw_data)\n if self.raw_col_means is not None:\n np.save(os.path.join(main_dir, 'raw_col_means.npy'), self.raw_col_means)\n if self.pca_eigvals is not None:\n np.save(os.path.join(main_dir, 'pca_eigvals.npy'), self.pca_eigvals)\n if self.pca_eigvecs is not None:\n np.save(os.path.join(main_dir, 'pca_eigvecs.npy'), self.pca_eigvecs)\n if self.pca_cum_var is not None:\n np.save(os.path.join(main_dir, 'pca_cum_var.npy'), self.pca_cum_var)\n if self.frames is not None:\n # save frame 0 only (still as 3-D array)\n np.save(os.path.join(main_dir, 'frame_0.npy'), self.frames[:, :, :1])\n members = {\n 'pca_transform': self.pca_transform,\n 'pca_num_dims': self.pca_num_dims,\n 'pca_var_threshold': self.pca_var_threshold,\n 'verbose': self.verbose,\n 'min_report_time': self.min_report_time,\n 'call_c': self.call_c,\n 'mean_row_distance': self.mean_row_distance\n }\n with open(os.path.join(main_dir, 'dqm_members'), 'wb') as pickle_file:\n pickle.dump(members, pickle_file)\n\n ## things that are specific to a given landscape (basis, parameter values, and operators)\n if sub_dir:\n if self.basis_rows is not None:\n np.save(os.path.join(sub_dir, 'basis_rows.npy'), self.basis_rows)\n if self.simt is not None:\n np.save(os.path.join(sub_dir, 'simt.npy'), self.simt)\n if self.xops is not None:\n np.save(os.path.join(sub_dir, 'xops.npy'), self.xops)\n if self.exph is not None:\n np.save(os.path.join(sub_dir, 'exph.npy'), self.exph)\n if self.frames is not None:\n np.save(os.path.join(sub_dir, 'frames.npy'), self.frames)\n members = {\n 'basis_num_chunks': self.basis_num_chunks,\n 'basis_rand_seed': self.basis_rand_seed,\n 'basis_row_nums': self.basis_row_nums,\n 'non_basis_row_nums': self.non_basis_row_nums,\n 'basis_size': self.basis_size,\n 'basis_start_with_outlier': self.basis_start_with_outlier,\n 'sigma': self.sigma,\n 'step': self.step,\n 'mass': self.mass,\n 'overlap_mean_threshold': self.overlap_mean_threshold,\n 'overlap_min_threshold': self.overlap_min_threshold,\n 'stopping_threshold': self.stopping_threshold\n }\n with open(os.path.join(sub_dir, 'dqm_members'), 'wb') as pickle_file:\n pickle.dump(members, pickle_file)\n # end if sub_dir is not None\n\n t1 = time()\n if self.verbose and t1 - t0 >= self.min_report_time:\n print(\"saved dqm instance in {} seconds\".format(round(t1 - t0)))\n # end method save\n\n\n # 2FIX: IT'S BRITTLE THAT ANY CHANGES IN MEMBER VARIABLES MUST BE MADE MANUALLY HERE AS WELL\n @classmethod\n def load(cls, main_dir, sub_dir=None, load_raw_data=True, verbose=True):\n '''\n Load an instance of the DQM class from disk and return it.\n\n :param main_dir: Relative or absolute path to folder. No default.\n :param sub_dir: Name of subdirectory (inside the 'main_dir' folder) for landscape-specific saved data.\n Default None.\n :param load_raw_data: Boolean: if True, we load raw data. Set to False to save time if raw\n data is very large. Default True.\n :param verbose: Boolean: whether to report on various operations. Default True.\n :return: a DQM instance with data loaded from 'main_dir' (and from sub_dir, if not None).\n '''\n\n t0 = time()\n\n assert cls.exists(main_dir), f\"dir '{main_dir}' must be a saved dqm instance\"\n if sub_dir:\n sub_dir_name = sub_dir\n sub_dir = os.path.join(main_dir, sub_dir)\n assert os.path.exists(os.path.join(sub_dir, 'dqm_members')), \\\n f\"sub dir '{sub_dir_name}' must exist and have saved dqm data\"\n # end if sub_dir is not None\n\n dqm = DQM()\n\n ## things that are common to all landscapes (raw data and PCA info)\n if load_raw_data:\n pth = os.path.join(main_dir, 'raw_data.npy')\n if os.path.exists(pth):\n dqm.raw_data = np.load(pth, allow_pickle=True)\n pth = os.path.join(main_dir, 'raw_col_means.npy')\n if os.path.exists(pth):\n dqm.raw_col_means = np.load(pth, allow_pickle=True)\n pth = os.path.join(main_dir, 'pca_eigvals.npy')\n if os.path.exists(pth):\n dqm.pca_eigvals = np.load(pth, allow_pickle=True)\n pth = os.path.join(main_dir, 'pca_eigvecs.npy')\n if os.path.exists(pth):\n dqm.pca_eigvecs = np.load(pth, allow_pickle=True)\n pth = os.path.join(main_dir, 'pca_cum_var.npy')\n if os.path.exists(pth):\n dqm.pca_cum_var = np.load(pth, allow_pickle=True)\n pth = os.path.join(main_dir, 'frame_0.npy')\n if os.path.exists(pth):\n dqm.frames = np.load(pth, allow_pickle=True)\n pth = os.path.join(main_dir, 'dqm_members')\n if os.path.exists(pth):\n with open(pth, 'rb') as pickle_file:\n members = pickle.load(pickle_file)\n dqm.pca_transform = members['pca_transform']\n dqm.pca_num_dims = members['pca_num_dims']\n dqm.pca_var_threshold = members['pca_var_threshold']\n dqm.verbose = members['verbose']\n dqm.min_report_time = members['min_report_time']\n dqm.call_c = members['call_c']\n dqm.mean_row_distance = members['mean_row_distance']\n # end with pickle file\n # end if members saved\n\n ## things that are specific to a given landscape (basis, parameter values, operators, evolved frames)\n if sub_dir:\n pth = os.path.join(sub_dir, 'basis_rows.npy')\n if os.path.exists(pth):\n dqm.basis_rows = np.load(pth, allow_pickle=True)\n pth = os.path.join(sub_dir, 'simt.npy')\n if os.path.exists(pth):\n dqm.simt = np.load(pth, allow_pickle=True)\n pth = os.path.join(sub_dir, 'xops.npy')\n if os.path.exists(pth):\n dqm.xops = np.load(pth, allow_pickle=True)\n pth = os.path.join(sub_dir, 'exph.npy')\n if os.path.exists(pth):\n dqm.exph = np.load(pth, allow_pickle=True)\n pth = os.path.join(sub_dir, 'frames.npy')\n if os.path.exists(pth):\n dqm.frames = np.load(pth, allow_pickle=True)\n pth = os.path.join(sub_dir, 'dqm_members')\n if os.path.exists(pth):\n with open(pth, 'rb') as pickle_file:\n members = pickle.load(pickle_file)\n dqm.basis_num_chunks = members['basis_num_chunks']\n dqm.basis_rand_seed = members['basis_rand_seed']\n dqm.basis_row_nums = members['basis_row_nums']\n dqm.non_basis_row_nums = members['non_basis_row_nums']\n dqm.basis_size = members['basis_size']\n dqm.basis_start_with_outlier = members['basis_start_with_outlier']\n dqm.sigma = members['sigma']\n dqm.step = members['step']\n dqm.mass = members['mass']\n dqm.overlap_mean_threshold = members['overlap_mean_threshold']\n dqm.overlap_min_threshold = members['overlap_min_threshold']\n dqm.stopping_threshold = members['stopping_threshold']\n # end with pickle file\n # end if members saved\n # end if sub_dir is not None\n\n t1 = time()\n if verbose and t1 - t0 >= cls.min_report_time:\n print(\"loaded dqm instance in {} seconds\".format(round(t1 - t0)))\n\n return dqm\n # end class method load\n\n# end class DQM\n\n","repo_name":"zanderteller/zt-rtd-test1","sub_path":"dqm/DQM.py","file_name":"DQM.py","file_ext":"py","file_size_in_byte":86739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12018284740","text":"import logging\nimport os\n\nMAYA_PROJECT_PATH = 'D:/BOULOT/TRAVAUX_PERSO/MAYA PROJECTS'\nMAX_PROJECT_PATH = 'D:/BOULOT/TRAVAUX_PERSO/3DSMAX PROJECTS'\n\n\ndef build_path(project, scenes_sound='', asset_anim='', asset_type='', asset='',\n task='', filename='', return_type='project'):\n \"\"\"\n Build path from given data.\n \n :param project: project to create path with\n :type project: str\n\n :param scenes_sound: type of scene or file to look for\n :type scenes_sound: str\n\n :param asset_anim: type of asset or animation to look for\n :type asset_anim: str\n \n :param asset_type: type to look for\n :type asset_type: str\n \n :param asset: asset to look for\n :type asset: str\n \n :param task: task to look for\n :type task: str\n \n :param filename: file to look for\n :type filename: str\n \n :param return_type: defines what type of path we want to output\n :type return_type: str\n \n :return: path to selected task/file\n :rtype: str\n \"\"\"\n\n logging.info(filename)\n # Build project path\n if return_type == 'project':\n return_path = '%s/%s' % (MAYA_PROJECT_PATH, project)\n\n elif return_type == 'directory':\n return_path = '%s/%s/%s/%s/%s/%s/%s' % (MAYA_PROJECT_PATH,\n project, scenes_sound,\n asset_anim, asset_type, asset,\n task)\n\n # Build file path\n elif return_type == 'file':\n return_path = '%s/%s/%s/%s/%s/%s/%s/%s' % (MAYA_PROJECT_PATH,\n project, scenes_sound,\n asset_anim, asset_type,\n asset, task, filename)\n\n # Build wip path\n elif return_type == 'wip':\n if filename == 'No file in this directory' \\\n or filename == '' \\\n or len(filename.split('_')) != 4 \\\n or build_increment(filename.split('_')[3]):\n wip_file = '%s_%s_%s_00.ma' % (asset_type, asset, task)\n else:\n # Split file name\n wip_file = filename.split('.')[0]\n logging.debug(wip_file)\n wip_file = wip_file.split('_')\n logging.debug(wip_file)\n # Increment version number\n wip_file[3] = build_increment(wip_file[3])\n logging.debug(wip_file[3])\n # Join publish file name\n wip_file = '_'.join(wip_file)\n logging.debug(wip_file)\n # Build path\n return_path = '%s/%s/%s/%s/%s/%s/%s/%s' % (MAYA_PROJECT_PATH,\n project, scenes_sound,\n asset_anim, asset_type,\n asset, task, wip_file)\n\n # Build publish path\n else:\n # Split file name\n publish_file = filename.split('_')\n # Remove increment and extension\n publish_file = publish_file[:3]\n logging.debug(publish_file)\n # Append PUBLISH plus extension\n publish_file.append('PUBLISH.ma')\n logging.debug(publish_file)\n # Join publish file name\n publish_file = '_'.join(publish_file)\n logging.debug(publish_file)\n # Build path\n return_path = '%s/%s/%s/%s/%s/%s/%s' % (MAYA_PROJECT_PATH,\n project, scenes_sound,\n asset_anim, asset_type, asset,\n publish_file)\n\n # print return_path\n return return_path\n\n\n# TODO : remove this\ndef build_increment(number, digits=3):\n \"\"\"\n Increment the given number and return it as a 2 decimal string (ie : 01, 02, etc.)\n :param number: number you want to increment\n :type number: str\n\n :param digits: number of digits the string should contain in total\n :type digits: int\n\n :return: incremented number\n :rtype: str\n \"\"\"\n try:\n # Set it as an integer\n increment = int(number)\n # Increment\n increment += 1\n # List it\n increment = str(increment).zfill(digits)\n\n except ValueError:\n increment = None\n logging.error('\"%s\" is not a number' % number)\n\n return increment\n\n\ndef create_subdir_list(given_path):\n \"\"\"\n create list of the subdirectories in the given directory\n \n :param given_path: directory to list subdirectories in\n :type given_path: str\n \n :return: list of the subdirectories\n :rtype: list\n \"\"\"\n # print 'Listing sub directories in %s' % given_path\n # List all the directories at the given path\n subdir_list = [sub_path for sub_path in os.listdir(given_path)\n if os.path.isdir(given_path+'/'+sub_path)]\n\n # Removing mayaSwatches, keyboard and edits\n subdir_list = [directory for directory in subdir_list\n if directory != '.mayaSwatches'\n and directory != 'Keyboard'\n and directory != 'edits']\n\n # Returning list\n return subdir_list\n\n\ndef build_files_list(given_path):\n \"\"\"\n Create a list of all the files in the given directory\n \n :param given_path: path to the directory you want to list the files in\n :type given_path: str\n \n :return: all the files in that directory\n :rtype: list\n \"\"\"\n # Set current directory to the given path\n os.chdir(given_path)\n # Filter files\n files = [dir_file for dir_file in os.listdir(given_path)\n if os.path.isfile(os.path.join(given_path, dir_file))]\n # Filter maya files\n maya_files = [maya_file for maya_file in files\n if '.ma' in maya_file\n or '.mb' in maya_file\n or '.fbx' in maya_file]\n # If no maya files\n if not maya_files:\n # list is used as verbose\n maya_files = ['No file in this directory']\n # If there are maya files\n else:\n # Sort them\n maya_files.sort(key=lambda x: os.path.getmtime(x))\n # Get most recent in first\n maya_files.reverse()\n\n return maya_files\n\n","repo_name":"hapaxe/Python","sub_path":"mla_GeneralPipe/mla_file_utils/mla_path_utils.py","file_name":"mla_path_utils.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27524749385","text":"#!/usr/bin/python3\nimport os\nimport sys\nimport subprocess\nimport json\nimport project\n\n\ncurrent_file = os.path.abspath(__file__)\nintegrate_dir = os.path.dirname(current_file)\nexample_dir = os.path.dirname(integrate_dir).replace(\" \", \"\\ \")\nexample_bin = os.path.join(example_dir, \"./bin\")\nexample_lib = os.path.join(example_dir, \"./lib\")\n\nypc_lib = os.path.join(project.ypc_lib_dir(), \"./\")\nypc_bin = os.path.join(ypc_lib, \"../bin\")\n\nkmgr_enclave = {\n 'stdeth': os.path.join(ypc_lib, \"keymgr.signed.so\"),\n 'gmssl': os.path.join(ypc_lib, \"keymgr_gmssl.signed.so\"),\n}\n\n\ndef execute_cmd(cmd):\n print(\"execute_cmd: {}\".format(cmd))\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n p.wait()\n if p.returncode != 0:\n raise RuntimeError('Failed to execute cmd {}'.format(cmd))\n return p.stdout.read().decode('utf-8', errors='ignore')\n\n\ndef fid_keymgr_create(user_id, crypto=\"stdeth\"):\n cmd = os.path.join(ypc_bin, \"./keymgr_tool\")\n cmd = cmd + \" --crypto {}\".format(crypto)\n param = {\"create\": \"\", \"user-id\": user_id}\n for k, v in param.items():\n cmd = cmd + \" --{} {}\".format(k, v)\n output = execute_cmd(cmd)\n return [cmd, output]\n\n\ndef fid_keymgr_list(crypto=\"stdeth\"):\n cmd = os.path.join(ypc_bin, \"./keymgr_tool\")\n cmd = cmd + \" --crypto {}\".format(crypto)\n output = execute_cmd(\"{} --list\".format(cmd))\n ls = output.split(\"\\n\")\n tkeyid = ''\n keys = {}\n\n for l in ls:\n l = l.strip()\n if l.startswith(\">> key \"):\n ks = l.split(\":\")\n tkeyid = ks[1].strip()\n if l.startswith(\"public key:\"):\n ks = l.split(\":\")\n pkey = ks[1].strip()\n if pkey.startswith(tkeyid):\n keys[tkeyid] = pkey\n\n return keys\n\n\ndef get_keymgr_private_key(keyid, crypto_type=\"stdeth\"):\n cmd = os.path.join(ypc_bin, \"./keymgr_tool\")\n cmd = cmd + \" --crypto {}\".format(crypto_type)\n output = execute_cmd(\"{} --list\".format(cmd))\n ls = output.split(\"\\n\")\n ks = ls[0].split(' ')\n dir_ = ks[len(ks) - 1]\n fp = os.path.join(dir_, keyid)\n info = {}\n with open(fp) as of:\n info = json.load(of)\n return info['private_key']\n\n\ndef fid_keymgr(**kwargs):\n cmd = os.path.join(ypc_bin, \"./keymgr_tool\")\n for k, v in kwargs.items():\n cmd = cmd + \" --{} {}\".format(k, v)\n output = execute_cmd(cmd)\n return [cmd, output]\n\n\ndef fid_data_provider(**kwargs):\n cmd = os.path.join(ypc_bin, \"./data_provider\")\n for k, v in kwargs.items():\n cmd = cmd + \" --{} {}\".format(k, v)\n output = execute_cmd(cmd)\n return [cmd, output]\n\n\ndef fid_dump(**kwargs):\n cmd = os.path.join(ypc_bin, \"./ydump\")\n for k, v in kwargs.items():\n cmd = cmd + \" --{} {}\".format(k, v)\n output = execute_cmd(cmd)\n return [cmd, output]\n\n\ndef fid_terminus(**kwargs):\n cmd = os.path.join(ypc_bin, \"./yterminus\")\n for k, v in kwargs.items():\n cmd = cmd + \" --{} {}\".format(k, v)\n output = execute_cmd(cmd)\n return [cmd, output]\n\n\ndef fid_analyzer(**kwargs):\n cmd = os.path.join(ypc_bin, \"./fid_analyzer\")\n cmd = \"GLOG_logtostderr=1 \" + cmd\n for k, v in kwargs.items():\n cmd = cmd + \" --{} {}\".format(k, v)\n output = execute_cmd(cmd)\n return [cmd, output]\n\n\ndef iris_data(**kwargs):\n cmd = os.path.join(ypc_bin, \"./iris_gen_classify_input\")\n for k, v in kwargs.items():\n cmd = cmd + \" --{} {}\".format(k, v)\n output = execute_cmd(cmd)\n return [cmd, output]\n\n\ndef iris_model(**kwargs):\n cmd = os.path.join(ypc_bin, \"./iris_gen_model\")\n for k, v in kwargs.items():\n cmd = cmd + \" --{} {}\".format(k, v)\n output = execute_cmd(cmd)\n return [cmd, output]\n","repo_name":"YeeZTech/YPC-algo-example","sub_path":"integrate/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24465199703","text":"\"\"\"Create a hangman game\"\"\"\nimport random\n\n\n\ndef letter_check(x):\n global word\n if x in word:\n print(f\"Yes, letter {x} is in the word\")\n else:\n print(\"Sorry. That letter is not in the word.\")\n\ndef word_check():\n global gl, word, blank\n for letter in word:\n if letter in gl:\n print(letter, end=\" \")\n else:\n print(blank, end=\" \")\n\nfin = open('wordlist.txt', 'r')\nwords = [line.strip() for line in fin]\nword = random.choice(words)\nblank = '_'\nguesses = 0\nletters = len(word)\nclue = len(word) * '_'\ngl = []\n\nprint(f\"\\nWelcome to Hangman by Kilgore Trout! Coming to you from another dimension.\\nYou will have 10 guesses to find the word.\\nYour word has {letters} letters.\")\nprint(clue)\n\nwhile guesses <= 9:\n s = input(\"\\nEnter your letter\\n>> \")\n if s in word:\n gl.append(s)\n guesses += 1\n letter_check(s)\n word_check()\n print(\"\\nYou have\", 10 - guesses,\"guesses left.\")\n\n wchars = set(word)\n wchars = list(wchars)\n wchars.sort()\n wchars = ''.join(wchars)\n gl.sort()\n\n if wchars == ''.join(gl):\n print(\"Congratulations. You win!!!\")\n raise SystemExit\n\n\nprint('Sorry mate you have run out of guesses. You lose.')\nprint(f'Your word was {word}')\n","repo_name":"npally/Hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73935916746","text":"from django.conf.urls import url\n\nfrom .views import create_master_course, update_master_course, delete_master_course\nfrom .views import create_scheduled_course, update_scheduled_course, delete_scheduled_course\nfrom .views import create_group, update_group, delete_group\n\napp_name = 'Programmes'\nurlpatterns = [\n url(r'^create/master/$', create_master_course, name='create_master_course'),\n url(r'^update/master/$', update_master_course, name='update_master_course'),\n url(r'^delete/master/$', delete_master_course, name='delete_master_course'),\n url(r'^create/scheduled/$', create_scheduled_course, name='create_scheduled_course'),\n url(r'^update/scheduled/$', update_scheduled_course, name='update_scheduled_course'),\n url(r'^delete/scheduled/$', delete_scheduled_course, name='delete_scheduled_course'),\n url(r'^create/group/$', create_group, name='create_group'),\n url(r'^update/group/$', update_group, name='update_group'),\n url(r'^delete/group/$', delete_group, name='delete_group'),\n]\n","repo_name":"rc3k/programmes-admin","sub_path":"urls_json_api.py","file_name":"urls_json_api.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"855193528","text":"from python_project_5 import *\r\nprint(\"Welcome to our Stores\")\r\nprint(\"---------------------------\")\r\nprint(\"* Upload details *\")\r\nprint(\"---------------------------\")\r\n\r\n\r\ndef showitem():\r\n a = list(Fdstores)\r\n for x in a:\r\n print(x)\r\n print(\"-----------------------------\")\r\n w = input(\"Enter the items : \")\r\n b = Fdstores[w].keys()\r\n for b in Fdstores[w]:\r\n print(b)\r\n\r\n\r\n\r\n\r\ndef stockitem():\r\n x = {\"product\": [], \"items\": [], \"quantity\": []}\r\n c = list(x.values())\r\n product = c[0]\r\n items = c[1]\r\n quantity = c[2]\r\n\r\n a = list(Fdstores)\r\n\r\n for x in a:\r\n print(x)\r\n print(\"---------------------------------------------\")\r\n b = input(\"Show the menu items : \")\r\n # print(list(Fdstores[b]))\r\n\r\n for y in Fdstores[b]:\r\n print(y)\r\n print(\"----------------------------------------------\")\r\n n = input(\"availabe item : \")\r\n print(Fdstores[b][n], \" : Stock available\")\r\n\r\ndef addstockitems():\r\n a = list(Fdstores)\r\n for x in a:\r\n print(x)\r\n print(\"-----------------------------\")\r\n\r\n w = input(\"Enter the items : \")\r\n b = Fdstores[w].keys()\r\n\r\n for b in Fdstores[w]:\r\n print(b)\r\n print(\"------------------------------\")\r\n k = list(Fdstores[w])\r\n print(k)\r\n d = input(\"enter the item to add : \")\r\n print(d)\r\n z=k.append(d)\r\n print(z)\r\n print(k)\r\n for j in k:\r\n print(j)\r\n print(\"------------------------------\")\r\n\r\n\r\n\r\n\r\nwhile True:\r\n try:\r\n print(\"1.Show items\\n2.Stockitems\\n3.addstockitems\\n4.Exit\")\r\n print(\"-------------------\")\r\n\r\n b = int(input(\"enter the choice : \"))\r\n\r\n except ValueError:\r\n continue\r\n else:\r\n if b == 1:\r\n showitem()\r\n print(\"-------------------\")\r\n elif b == 2:\r\n stockitem()\r\n print(\"---------------------\")\r\n elif b == 3:\r\n addstockitems()\r\n else:\r\n print(\"---Well done---\")\r\n quit()\r\n Fdstores.clear()\r\n","repo_name":"franklindinesh/Store-Details","sub_path":"upload stock.py","file_name":"upload stock.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7780891830","text":"import os\nimport tempfile\nimport subprocess\nfrom PreTeX import helper\nfrom PreTeX import helper\n\ndef setup_module(module):\n tmpdir = tempfile.mkdtemp()\n os.chdir(tmpdir)\n\nclass TestHelper:\n def test_extract_extracts(self):\n content = \"This is a test. I'm not a sk8er boi.\"\n regex = \"sk[1-9][aeoir]{2} [\\w]*\\.$\"\n\n result = helper.extract(content, regex)\n\n assert result == \"sk8er boi.\"\n\n def test_extract_returns_empty_sting_if_no_match(self):\n content = \"Hello. This is a test.\"\n regex = \"I will not match nothin'\"\n\n result = helper.extract(content, regex)\n\n assert result == \"\"\n\n def test_execute_executes(self, monkeypatch):\n class communicatemock:\n def communicate(*args):\n return 24.0, None\n def popenmock(*args, **kwargs):\n return communicatemock()\n\n monkeypatch.setattr(subprocess, 'Popen', popenmock)\n\n command = \"python\"\n args = \"dummy.py 3\"\n\n result = helper.execute([command, args])\n\n assert result[0] == \"24.0\"\n\n def test_helper_can_load_file(self):\n file_name = 'testefil'\n file_contents = 'tekst'\n file = open(file_name, 'w')\n file.write(file_contents)\n file.close()\n\n result = helper.load(file_name)\n\n assert result == file_contents\n\n def test_helper_can_write_file(self):\n file_name = 'testefil2'\n file_contents = 'tekst'\n\n helper.write(file_name,file_contents)\n\n with open(file_name, 'r') as f:\n read_contents = f.read()\n\n assert read_contents == file_contents\n","repo_name":"nicolaiskogheim/INF3331-Nicolai","sub_path":"oblig02/tests/unit/helper_test.py","file_name":"helper_test.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4570513933","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport os\r\n\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nimport ipdb\r\n\r\nimport utils\r\nimport pretrain_utils\r\nimport models\r\nimport layers\r\n\r\ndef fuse_feature(feature_list, fuse='last'): #get embedding feature from encoder\r\n\r\n if fuse == 'last':\r\n fused_feat = feature_list[-1]\r\n elif fuse == 'avg':\r\n fused_feat = torch.mean(torch.stack(feature_list))\r\n elif fuse == 'concat':\r\n fused_feat = torch.cat(feature_list, dim=-1)\r\n\r\n return fused_feat\r\n\r\ndef cal_feat_dim(args): # get dimension of obtained embedding feature\r\n emb_dim = args.nhid\r\n if args.fuse == 'concat':\r\n emb_dim = emb_dim * args.enc_layer\r\n\r\n return emb_dim\r\n\r\n\r\nclass Trainer(object):\r\n def __init__(self, args, model, weight):#\r\n self.args = args\r\n\r\n self.in_dim = cal_feat_dim(args)\r\n self.loss_weight = weight\r\n self.models = []\r\n self.models.append(model)\r\n if args.model=='DISGAT':\r\n if args.residue:\r\n self.fuse1 = layers.FuseLayer(args, args.nhead, nfeat=args.nhid, residue=args.size)\r\n self.fuse2 = layers.FuseLayer(args, args.nhead, nfeat=args.nhid, residue=args.nhid)\r\n else:\r\n self.fuse1 = layers.FuseLayer(args, args.nhead, nfeat=args.nhid)\r\n self.fuse2 = layers.FuseLayer(args, args.nhead, nfeat=args.nhid)\r\n\r\n\r\n if args.cuda:\r\n self.fuse1.cuda()\r\n self.fuse2.cuda()\r\n self.models.append(self.fuse1)\r\n self.models.append(self.fuse2)\r\n\r\n self.models_opt = []\r\n for model in self.models:\r\n self.models_opt.append(optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay))\r\n\r\n def train_step(self, data, pre_adj):#pre_adj corresponds to adj used for generating ssl signal\r\n raise NotImplementedError('train not implemented for base class')\r\n\r\n def inference(self, data):\r\n raise NotImplementedError('infer not implemented for base class')\r\n\r\n def get_label_all(self, feature, adj):\r\n raise NotImplementedError('get_label_all not implemented for base class')\r\n\r\n def get_em(self, feature, adj):\r\n if self.args.model != 'DISGAT':\r\n output = self.models[0].get_em(feature, adj)\r\n else:\r\n fusers = [self.fuse1, self.fuse2]\r\n output = self.models[0].get_em(feature, adj, fusers)\r\n\r\n output = fuse_feature(output, fuse=self.args.fuse)\r\n\r\n return output\r\n\r\n def analyze_disentangle(self,feature,adj):\r\n # for analyze learned distribution of disentangled edge-aware channels\r\n #at_distance_list: [nlayers*1],obtain attention KL distance at each layer\r\n #correlation_graph_lists: [nlayers * (nheads*nheads)],obtain correlation between attention heads as grid graph at each layer\r\n #feat_dim_graph_lists = [nlayers * (nhid *nhid) ], obtain correlation between each dim pair as grid graph of each layer\r\n assert self.args.model == 'DISGAT', 'analyze disentanglement is only implemented for DISGAT'\r\n fusers = [self.fuse1, self.fuse2]\r\n\r\n feats = self.models[0].get_em(feature, adj, fusers) # nlayers * (n_node*nhid)\r\n\r\n #edge indices for comparing distribution of disentangled edges\r\n if True:\r\n if adj.is_sparse:\r\n adj_cand = adj.to_dense()\r\n else:\r\n adj_cand = adj\r\n adj_cand = (adj_cand!=0).int()\r\n label = (adj_cand!=0).float()\r\n mask = label.new(label.shape).fill_(0)\r\n #randomly select edge entries for training\r\n edge_ratio = adj_cand.sum()/(adj.shape[0]*adj.shape[0])\r\n random_ini_mask = (torch.rand(size=mask.shape) < edge_ratio.item()*3).int()\r\n mask[:] = random_ini_mask[:]\r\n \r\n indices = adj_cand.nonzero().cpu().numpy()\r\n edge_num = int(indices.shape[0]//3)\r\n np.random.shuffle(indices)\r\n pos_indices = indices[:edge_num]\r\n mask[pos_indices[:,0],pos_indices[:,1]] = 1\r\n\r\n indices = mask.nonzero().transpose(0,1) #(2,edge_num)\r\n\r\n if adj.is_sparse:\r\n adjs = self.models[0].predict_adjs_sparse(feature, adj, fusers, auxiliary_edges=indices) #nlayer *nhead*1 * (n_edges*1)\r\n else:\r\n adjs = self.models[0].get_adjs(feature, adj, fusers)\r\n\r\n ##compute the metrics\r\n at_cor_graph_lists = []\r\n at_distance_list = []\r\n feat_cor_graph_lists = []\r\n\r\n for layer in range(2):\r\n feat_cor_graph_lists.append(utils.group_correlation(feats[layer].transpose(0,1)))\r\n\r\n adj_layer = [adj[0] for adj in adjs[layer]]\r\n adj_layer = torch.stack(adj_layer).squeeze()\r\n at_cor_graph =utils.group_correlation(adj_layer)\r\n at_cor_graph_lists.append(at_cor_graph)\r\n \r\n at_distance_list.append(torch.mean(torch.abs(at_cor_graph)).item())\r\n\r\n return at_distance_list, at_cor_graph_lists, feat_cor_graph_lists\r\n\r\n def reg_fuser(self):\r\n l1_norm1 = sum(p.abs().sum() for p in self.fuse1.parameters())\r\n l1_norm2 = sum(p.abs().sum() for p in self.fuse2.parameters())\r\n reg_loss = self.args.reg_weight * (l1_norm1+l1_norm2)\r\n #print('reg_loss: {}'.format(reg_loss.item()))\r\n\r\n return reg_loss\r\n\r\n \r\n def train_batch(self, graphset, label, batch_id):#batch_id: indicating the batches being used\r\n raise NotImplementedError('train_batch not implemented for base class')\r\n \r\n\r\n#Node classification\r\nclass ClsTrainer(Trainer):\r\n def __init__(self, args, model, labels, weight=1.0):\r\n super().__init__(args, model, weight) \r\n\r\n \r\n self.classifier = models.MLP(in_feat=self.in_dim, hidden_size=args.nhid, out_size=labels.max().item() + 1, layers=args.cls_layer)\r\n if args.cuda:\r\n self.classifier.cuda()\r\n self.classifier_opt = optim.Adam(self.classifier.parameters(), lr=args.lr, weight_decay=args.weight_decay)\r\n \r\n self.models.append(self.classifier)\r\n self.models_opt.append(self.classifier_opt)\r\n\r\n #split train, test, valid for node classification\r\n self.idx_train, self.idx_val, self.idx_test, self.class_num_mat = utils.split(labels, train_ratio=args.node_sup_ratio)\r\n if args.cuda:\r\n self.idx_train = self.idx_train.cuda()\r\n self.idx_val = self.idx_val.cuda()\r\n self.idx_test = self.idx_test.cuda()\r\n\r\n '''\r\n tmp = self.idx_train\r\n self.idx_train = self.idx_test\r\n self.idx_test = tmp\r\n '''\r\n\r\n #print(self.class_num_mat)\r\n\r\n def train_step(self, data, labels, epoch):\r\n for i, model in enumerate(self.models):\r\n model.train()\r\n self.models_opt[i].zero_grad()\r\n\r\n feature, adj = data\r\n \r\n output = self.get_em(feature,adj)\r\n output = self.models[-1](output, cls=True)\r\n\r\n #ipdb.set_trace()\r\n loss_log = F.nll_loss(output[self.idx_train], labels[self.idx_train])\r\n acc_train = utils.accuracy(output[self.idx_train], labels[self.idx_train])\r\n loss_train = loss_log\r\n if self.args.model=='DISGAT':\r\n reg_log = self.reg_fuser()\r\n else:\r\n reg_log = loss_log\r\n if self.args.reg:\r\n loss_train = loss_log + reg_log\r\n\r\n loss_train = loss_train*self.loss_weight\r\n loss_train.backward()\r\n\r\n for opt in self.models_opt:\r\n opt.step()\r\n \r\n #ipdb.set_trace()\r\n\r\n loss_val = F.nll_loss(output[self.idx_val], labels[self.idx_val])\r\n acc_val = utils.accuracy(output[self.idx_val], labels[self.idx_val])\r\n #utils.print_class_acc(output[self.idx_val], labels[self.idx_val], self.class_num_mat[:,1])\r\n roc_val, macroF_val = utils.Roc_F(output[self.idx_val], labels[self.idx_val], self.class_num_mat[:,1])\r\n\r\n \r\n print('Epoch: {:05d}'.format(epoch+1),\r\n 'loss_train: {:.4f}'.format(loss_log.item()),\r\n 'loss_reg: {:.4f}'.format(reg_log.item()),\r\n 'acc_train: {:.4f}'.format(acc_train.item()),\r\n 'loss_val: {:.4f}'.format(loss_val.item()),\r\n 'acc_val: {:.4f}'.format(acc_val.item()))\r\n \r\n log_info = {'loss_train': loss_log.item(), 'acc_train': acc_train.item(), 'loss_reg': reg_log.item(),\r\n 'loss_val': loss_val.item(), 'acc_val': acc_val.item(), 'roc_val': roc_val, 'macroF_val': macroF_val }\r\n\r\n return log_info\r\n\r\n def train_batch(self, graphset, label, batch_id):\r\n if (batch_id+1)*self.args.batch_size >= len(self.idx_train):\r\n raise ValueError(\"idx out of boundary\")\r\n\r\n idx_used = np.arange(batch_id*self.args.batch_size, (batch_id+1)*self.args.batch_size)\r\n idx_batch = self.idx_train[idx_used]\r\n\r\n for i, model in enumerate(self.models):\r\n model.train()\r\n self.models_opt[i].zero_grad()\r\n\r\n feature, adj, _ = graphset.get_batch(idx_batch, self.args.SubgraphSize)\r\n \r\n\r\n output = self.get_em(feature,adj)\r\n output = self.models[-1](output, cls=True)\r\n\r\n loss_train = F.nll_loss(output[:,0], label[idx_batch]) * self.loss_weight\r\n acc_train = utils.accuracy(output[:,0], label[idx_batch])\r\n\r\n if self.args.reg:\r\n loss_train = loss_train + self.reg_fuser()\r\n\r\n loss_train.backward()\r\n\r\n for opt in self.models_opt:\r\n opt.step()\r\n\r\n log_info = {'loss_train': loss_train.item(), 'acc_train': acc_train.item()}\r\n \r\n print('batch: {:05d}'.format(batch_id),\r\n 'loss_train: {:.4f}'.format(loss_train.item()),\r\n 'acc_train: {:.4f}'.format(acc_train.item()))\r\n\r\n return log_info\r\n\r\n def test_batch(self, graphset, label, is_test=True):\r\n \r\n loss = []\r\n acc = []\r\n for i, model in enumerate(self.models):\r\n model.eval()\r\n if is_test:\r\n idx_used = self.idx_test\r\n else:\r\n idx_used = self.idx_val\r\n\r\n for i in range(len(idx_used)//self.args.batch_size):\r\n idx_batch = idx_used[i*self.args.batch_size:(i+1)*self.args.batch_size]\r\n\r\n feature, adj, _ = graphset.get_batch(idx_batch, self.args.SubgraphSize)\r\n \r\n\r\n output = self.get_em(feature,adj)\r\n output = self.models[-1](output, cls=True)\r\n\r\n\r\n #ipdb.set_trace()\r\n loss_test = F.nll_loss(output[:,0], label[idx_batch])\r\n acc_test = utils.accuracy(output[:,0], label[idx_batch])\r\n\r\n loss.append(loss_test.item())\r\n acc.append(acc_test.item())\r\n\r\n log_info = {'loss_test': sum(loss)/len(loss), 'acc_test': sum(acc)/len(acc)}\r\n \r\n print(\"Test set results:\",\r\n \"loss= {:.4f}\".format(sum(loss)/len(loss)),\r\n \"accuracy= {:.4f}\".format(sum(acc)/len(acc)))\r\n\r\n return log_info\r\n\r\n def test(self, data, labels, epoch = 0):\r\n for i, model in enumerate(self.models):\r\n model.eval()\r\n\r\n feature, adj = data\r\n \r\n output = self.get_em(feature, adj)\r\n\r\n output = self.models[-1](output, cls=True)\r\n\r\n loss_test = F.nll_loss(output[self.idx_test], labels[self.idx_test])\r\n acc_test = utils.accuracy(output[self.idx_test], labels[self.idx_test])\r\n\r\n print(\"Test set results:\",\r\n \"loss= {:.4f}\".format(loss_test.item()),\r\n \"accuracy= {:.4f}\".format(acc_test.item()))\r\n\r\n utils.print_class_acc(output[self.idx_test], labels[self.idx_test], self.class_num_mat[:,2], pre='test')\r\n \r\n roc_test, macroF_test = utils.Roc_F(output[self.idx_test], labels[self.idx_test], self.class_num_mat[:,2])\r\n \r\n log_info = {'loss_test': loss_test.item(), 'acc_test': acc_test.item(), 'roc_test': roc_test, 'macroF_test': macroF_test}\r\n\r\n return log_info\r\n\r\n\r\n#Edge prediction\r\nclass EdgeTrainer(Trainer):\r\n def __init__(self, args, model, labels, weight=1.0):\r\n super().__init__(args, model, weight) \r\n\r\n self.classifier = models.EdgePredictor(nfeat=self.in_dim, nhid=args.nhid, layers=args.EdgePred_layer)\r\n\r\n if args.cuda:\r\n self.classifier.cuda()\r\n\r\n self.classifier_opt = optim.Adam(self.classifier.parameters(), lr=args.lr, weight_decay=args.weight_decay)\r\n self.models.append(self.classifier)\r\n self.models_opt.append(self.classifier_opt)\r\n\r\n #split train, test, valid for node classification\r\n self.idx_train, self.idx_val, self.idx_test, self.class_num_mat = utils.split(labels, train_ratio=0.5)\r\n if args.cuda:\r\n self.idx_train = self.idx_train.cuda()\r\n self.idx_val = self.idx_val.cuda()\r\n self.idx_test = self.idx_test.cuda()\r\n\r\n def train_step(self, data, labels, epoch):\r\n for i, model in enumerate(self.models):\r\n model.train()\r\n self.models_opt[i].zero_grad()\r\n\r\n feature, adj = data\r\n if adj.is_sparse:\r\n adj_tgt = adj.to_dense()\r\n else:\r\n adj_tgt = adj\r\n adj_tgt = (adj_tgt !=0).float()\r\n\r\n output = self.get_em(feature, adj)\r\n output = self.models[-1](output)\r\n\r\n loss_train = utils.adj_mse_loss(output[self.idx_train,:][:,self.idx_train], adj_tgt[self.idx_train,:][:,self.idx_train])*self.loss_weight\r\n if self.args.reg:\r\n loss_train = loss_train + self.reg_fuser()\r\n loss_train.backward()\r\n\r\n for opt in self.models_opt:\r\n opt.step()\r\n\r\n loss_val = utils.adj_mse_loss(output[self.idx_val,:][:,self.idx_val], adj_tgt[self.idx_val,:][:,self.idx_val])*self.loss_weight\r\n\r\n output[output>0.5] = 1.0\r\n output[output<0.5] = 0.0\r\n acc_train, TP_train, TN_train = utils.adj_accuracy(output[self.idx_train,:][:,self.idx_train], adj_tgt[self.idx_train,:][:,self.idx_train])\r\n acc_val, TP_val, TN_val = utils.adj_accuracy(output[self.idx_val,:][:,self.idx_val], adj_tgt[self.idx_val,:][:,self.idx_val])\r\n\r\n '''\r\n print('Epoch: {:05d}'.format(epoch+1),\r\n 'loss_train: {:.4f}'.format(loss_train.item()),\r\n 'acc_train: {:.4f}'.format(acc_train.item()),\r\n 'acc_val: {:.4f}'.format(acc_val.item()),\r\n 'TP_train: {:.4f}'.format(TP_train.item()),\r\n 'TP_val: {:.4f}'.format(TP_val.item()),\r\n 'TN_train: {:.4f}'.format(TN_train.item()),\r\n 'TN_val: {:.4f}'.format(TN_val.item()))\r\n '''\r\n \r\n log_info = {'loss_train': loss_train.item(), 'acc_train': acc_train.item(),\r\n 'loss_val': loss_val.item(), 'acc_val': acc_val.item(),\r\n 'TP_train': TP_train.item(), 'TP_val': TP_val.item(),\r\n 'TN_train': TN_train.item(), 'TN_val': TN_val.item()}\r\n\r\n return log_info\r\n\r\n\r\n def test(self, data, labels, epoch = 0):\r\n for i, model in enumerate(self.models):\r\n model.eval()\r\n\r\n feature, adj = data\r\n if adj.is_sparse:\r\n adj_tgt = adj.to_dense()\r\n else:\r\n adj_tgt = adj\r\n adj_tgt = (adj_tgt !=0).float()\r\n\r\n output = self.get_em(feature, adj)\r\n output = self.models[-1](output)\r\n\r\n loss_test = utils.adj_mse_loss(output[self.idx_test,:], adj_tgt[self.idx_test,:])\r\n \r\n output[output>0.5] = 1.0\r\n output[output<0.5] = 0.0\r\n acc_test, TP_test, TN_test = utils.adj_accuracy(output[self.idx_test,:], adj_tgt[self.idx_test,:])\r\n\r\n\r\n print(\"Test set results:\",\r\n \"loss= {:.4f}\".format(loss_test.item()),\r\n \"accuracy= {:.4f}\".format(acc_test.item()),\r\n \"TP= {:.4f}\".format(TP_test.item()),\r\n \"TN= {:.4f}\".format(TN_test.item()))\r\n\r\n log_info = {'loss_test': loss_test.item(), 'acc_test': acc_test.item(), 'TP_test': TP_test.item(), 'TN_test': TN_test.item()}\r\n\r\n return log_info","repo_name":"TianxiangZhao/EdgeDisentangle_SSL","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":16218,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"30998569484","text":"import re\nfrom lexer import PriToken\n\n\nclass Scanner():\n def __init__(self, path):\n # 读入文件位置\n self.path = path\n # 设置缓冲区\n self.text = \"\"\n with open(self.path, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n # 将文件中注释去掉\n self.text = self.text + \\\n line.split(\"//\")[0].split(\"--\")[0].split(\"\\n\")[0]\n self.text = self.text.upper().strip()\n self.lexer = PriToken.Lexer()\n self.output_lists = []\n\n def analyze(self):\n sentences = re.split(\"(;)\", self.text)\n # No.0\n # 识别\n # E->E;|ε\n # 用于记录状态机状态,当state == True时,意味着可以读入一个E,当state == False时,意味着可以读入一个;\n state = True\n for sentence in sentences:\n if state and sentence != \";\":\n state = False\n self.lexer.getToken(sentence)\n self.output_lists.extend(self.lexer.output_list)\n elif sentence == \";\":\n state = True\n else:\n raise SyntaxError()\n if state:\n raise SyntaxError()\n # No.0 识别结束\n return self.output_lists\n","repo_name":"otach1/SimFuncLanInterpreter","sub_path":"scanner/PriScan.py","file_name":"PriScan.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3971458998","text":"from sqlalchemy.orm import relationship\nfrom sqlalchemy.orm.relationships import foreign\nfrom db import db\n\n#Database Table Contents\nclass PositionTable(db.Model):\n __tablename__ = \"position\"\n\n id = db.Column(db.Integer, primary_key=True)\n position_name = db.Column(db.String(100),nullable=False)\n player_id = db.Column(db.Integer, db.ForeignKey('players.id')) #Relationship with Player Table (one to one)(Child Table = Position_Table , Parent Table = Player_Table)\n\n \n \n\n \n\n#Functions and Properties\nclass PositionAccess(object):\n @property\n def positions(self):\n return PositionTable.query.all()\n\n def get(self, id):\n position = PositionTable.query.filter_by(id=id).first()\n if position:\n return {\n \"id\" : PositionTable.id, \n \"position_name\" : PositionTable.position_name,\n \"player_id\" : PositionTable.player_id\n }\n else: print('Position not found')\n\n def create(self, data):\n position = PositionTable(\n position_name = data['position_name'],\n player_id = data['player_id']\n ) \n db.session.add(position)\n db.session.commit()\n return position\n\n def update(self, id, data):\n position = PositionTable.query.filter_by(id=id).first()\n position.position_name = data['position_name'],\n position.player_id = data['player_id']\n db.session.commit()\n return position\n\n def delete(self, id):\n position = PositionTable.query.filter_by(id=id).first()\n db.session.delete(position)\n db.session.commit()","repo_name":"Abdul-Ahad-ikhub/IPL-API","sub_path":"model/position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33343525878","text":"from typing import Union\n\nimport tensorflow # type: ignore\nimport tensorflow_datasets as tfds # type: ignore\nfrom tensorflow_datasets import Split\nfrom tqdm import tqdm\n\nfrom deeplake.core.dataset import Dataset\nimport deeplake\n\n\ndef from_tfds_to_path(\n tfds_dataset_name: str,\n split: Union[str, Split],\n deeplake_ds_path: str,\n batch_size: int = 100,\n):\n \"\"\"Converts the tfds dataset with name `tfds_dataset_name` into a Deep Lake dataset and saves it at `deeplake_ds_path`\n Args:\n tfds_dataset_name (str): Name of tfds dataset.You can see a list of all tfds datasets here:\n https://www.tensorflow.org/datasets/catalog/overview\n split (str, Split) : Used for dataset splits as defined here: https://www.tensorflow.org/datasets/splits\n deeplake_ds_path (str): Path where new Deep Lake dataset will be created\n batch_size (int): Batch size for tfds dataset. Has no effect on output, but may affect performance.\n Returns:\n A Deep Lake dataset\n \"\"\"\n tfds_ds = tfds.load(tfds_dataset_name, split=split).batch(batch_size)\n ds = deeplake.dataset(deeplake_ds_path)\n\n return from_tfds(tfds_ds=tfds_ds, ds=ds) # type: ignore\n\n\ndef from_tfds(tfds_ds: tensorflow.data.Dataset, ds: Dataset):\n \"\"\"Converts a tfds dataset to Deep Lake dataset\n Args:\n tfds_ds (tensorflow.data.Dataset): A tfds_dataset object.\n ds (Dataset) : A Deep Lake dataset object where Tensor will be created.\n Returns:\n A Deep Lake dataset\n \"\"\"\n tfds_numpy = tfds.as_numpy(tfds_ds) # Convert `tf.data.Dataset` to Python generator\n\n for sample in tqdm(tfds_numpy):\n for col in sample:\n if col not in ds.tensors:\n ds.create_tensor(col)\n ds[col].extend(sample[col])\n return ds\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/util/from_tfds.py","file_name":"from_tfds.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"} +{"seq_id":"37072737517","text":"import os\nimport sys\nfrom .DataSplit import data_split\n\n\nclass globalData():\n data_size = 0\n use_times = 0\n\n\nclass data_set:\n def __init__(self, batch_size, epoch):\n print(\"data_set init\")\n self.batch_size = batch_size\n self.epoch = epoch\n self.data_init()\n self.train_test_split()\n\n # 初始化函数\n def data_init(self):\n path = os.path.join(sys.path[0])\n print(\"test path:\" + path)\n # 反射,调用processer模块,获取数据方法get_data\n moduleName = 'Processing' # 要引入的模块\n className = \"processor\" # 要使用的方法\n model = __import__(moduleName, globals=path) # 导入模块\n self.process_class = getattr(model, className) # 找到模块中的属性\n\n self.X, self.y = self.process_class().get_data()\n self.process_train_param()\n print('DataSet init data')\n\n def train_test_split(self):\n d_split = data_split(self.X, self.y)\n self.X, self.x_test, self.y, self.y_test = d_split.random_split(0.1)\n self.process_train_param()\n print('train test split :')\n print(self.y)\n print(self.y_test)\n\n def process_train_param(self):\n globalData.data_size = len(self.X)\n self.epoch = globalData.data_size // self.batch_size\n check_num = globalData.data_size % self.batch_size\n if check_num > 0:\n self.epoch += 1\n\n def fetch_all_data(self):\n return self.X, self.y\n\n # 获取一批训练数据\n def fetch_next_batch(self):\n if self.batch_size >= globalData.data_size:\n return self.data_process(self.X, self.y)\n else:\n # 获取数据的数量\n fetch_num = self.batch_size * globalData.use_times\n fetch_end_num = self.batch_size * (globalData.use_times + 1)\n\n # 处理X与y获取的批次数据\n batch_x = self.X[fetch_num:fetch_end_num]\n batch_y = self.y[fetch_num:fetch_end_num]\n globalData.use_times = globalData.use_times + 1\n return self.data_process(batch_x, batch_y)\n\n def data_process(self, x, y):\n x_data = self.process_class().input_x(x)\n y_data = self.process_class().input_y(y)\n return x_data, y_data\n\n def fetch_next_test(self):\n x_data, y_data = self.data_process(self.x_test, self.y_test)\n return x_data, y_data\n\n # 获取当前数据执行批次\n def get_step(self):\n return self.epoch\n","repo_name":"sall84993356/AiHappyFramework","sub_path":"Framework/FeatureProcessing/DataSet.py","file_name":"DataSet.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"972135991","text":"# Strukov Alexandr\r\n\r\nfrom datetime import datetime\r\n\r\ndef ExistenceOfFiles(name, type):\r\n\r\n try:\r\n return open('{}.txt'.format(name), 'x+')\r\n except FileExistsError:\r\n return open('{}.txt'.format(name), '{}'.format(type))\r\n\r\ndef Number(name):\r\n\r\n F = open('{}.txt'.format(name), 'r+')\r\n line = F.readlines()\r\n F.close()\r\n\r\n i = 1\r\n try:\r\n while not str(line[-i][1]).isalpha():\r\n try:\r\n return int(line[-i][1])\r\n except ValueError:\r\n i += 1\r\n except IndexError:\r\n return 0\r\n\r\ndef AllFilesInOne(name_of_files):\r\n\r\n Universal = open('AllQuotes.txt', 'w')\r\n\r\n Universal.write('Hi, guy!' + '\\n' + 'Here are your quotes:' + '\\n')\r\n\r\n for file in name_of_files:\r\n Excerpt = open('{}.txt'.format(file), 'r')\r\n\r\n Universal.write('\\n' + file + '\\n')\r\n for line in Excerpt.readlines():\r\n Universal.write('\\t' + str(line[:-1:]) + '\\n')\r\n\r\n Excerpt.close()\r\n\r\n Universal.write('\\n' + '(C) Sasha Strukov' + '\\n' + datetime.today().strftime('%d-%m-%Y %H:%M:%S'))\r\n\r\n Universal.close()\r\n\r\n\r\ndef ShortString(String):\r\n if len(String) > 100:\r\n for char in range(100, len(String), 100):\r\n space = String[:char:].rfind(' ')\r\n String = String[:space:] + '\\n ' + String[space::]\r\n\r\n return String\r\n\r\n\r\nNo = [line[:-1:] for line in open('No.txt', 'r').readlines()]\r\n\r\nprint('Do you want to add some quotes?')\r\nresponse = input()\r\n\r\nwhile response not in No:\r\n\r\n Library = ExistenceOfFiles('Names_of_books', 'r+')\r\n library = [line[:-1:] for line in Library]\r\n\r\n print('Print a book, where the quote is: (Write an author or the name of the book)')\r\n book = input()\r\n if str(library).find(book) == -1:\r\n Library.write(book + '\\n')\r\n else:\r\n book = str([word for word in library if book in word])[2:-2:]\r\n\r\n print('Print the phrases, you want to be written: (Print smth negative to stop)')\r\n phr = input()\r\n while phr not in No:\r\n Quote = ExistenceOfFiles(book, 'a+')\r\n phr = ShortString(phr)\r\n count = Number(book) + 1\r\n Quote.write('#{} {}\\n'.format(count, phr))\r\n Quote.close()\r\n phr = input()\r\n\r\n print('Another book?')\r\n response = input()\r\n\r\n Library.close()\r\n\r\nelse:\r\n print('Do you want to see every quote, you`ve wtitten?')\r\n if input() not in No:\r\n Library = ExistenceOfFiles('Names_of_books', 'r+')\r\n AllFilesInOne([line[:-1:] for line in Library])\r\n Library.close()\r\n else:\r\n print('It`s a pity!\\nBye!')\r\n","repo_name":"MrPod/ExcerptBook","sub_path":"Quote.py","file_name":"Quote.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32247087342","text":"# Given the root of a binary tree, return its maximum depth.\n\n# A binary tree's maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.\n\n\nclass Solution:\n def maxDepth_Rec(self, root):\n if not root:\n return 0\n return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))\n\n def maxDepth_BFS(self, root: Optional[TreeNode]) -> int:\n # BFS\n if not root:\n return 0\n\n lvl = 0\n q = collections.deque()\n q.append(root)\n\n while q:\n size = len(q)\n\n for i in range(size):\n node = q.popleft()\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n lvl += 1\n\n return lvl\n\n def maxDepth_DFS(self, root: Optional[TreeNode]) -> int:\n # DFS\n stack = []\n stack.append([root, 1])\n depth = 0\n\n while stack:\n node, curr_depth = stack.pop()\n\n if node:\n depth = max(depth, curr_depth)\n stack.append([node.left, curr_depth + 1])\n stack.append([node.right, curr_depth + 1])\n\n return depth\n","repo_name":"prpr4667/leetcode","sub_path":"blind75/104_max_depth_BT.py","file_name":"104_max_depth_BT.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13201357404","text":"import math\nimport pygame\nfrom color import *\n\n\nclass Block:\n \"\"\"\n This class allows us to simply create and manage blocks.\n \"\"\"\n ID = 0\n def __init__(self, mass, x, vx, FPS, APP_HEIGHT, color = colors[\"white\"]):\n self.id = Block.ID\n Block.ID += 1\n\n self.mass = mass\n self.size = self.compute_size()\n\n self.distance_traveled = 0\n self.x = x\n self.y = APP_HEIGHT - self.size\n self.vx = vx\n self.dt = 1 / FPS\n \n self.color = color\n self.rect = pygame.Rect((self.x, self.y), (self.size, self.size))\n self.img = pygame.Surface((self.size, self.size))\n self.img.fill(self.color)\n\n def compute_size(self):\n size = 10 + math.log(self.mass) * 2\n return size\n\n def move(self, time_speed_modifier):\n self.x += self.vx * self.dt * time_speed_modifier\n self.distance_traveled += abs(self.vx * self.dt * time_speed_modifier)\n self.rect = pygame.Rect((self.x, self.y), (self.size, self.size))\n\n def collide(self, block):\n if self.x <= block.x <= self.x + self.size or self.x <= block.x + block.size <= self.x + self.size:\n return True\n return False","repo_name":"remigerme/solid_collision_simulation","sub_path":"prototype/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7629181034","text":"\nfrom utils import *\nfrom Events import Event\n\n\nclass UserAttrib(object):\n\t\"\"\" The idea/plan for this attrib type is:\n\tUse it in the GUI and display it nicely. Store every GUI related info here.\n\tI.e. this should say whether it is read-only to the user (if not visible to user at all ->\n\t don't use this class), if it should be represented as a list, string, etc.\n\t (this is the type, right now all Traits.TraitTypes), some other GUI decoration stuff,\n\t etc.\n\tNote that this lays in the utils module because it is completely decoupled\n\tfrom the GUI. It only stores information which might be useful for a GUI.\n\t\"\"\"\n\n\tstaticCounter = 0\n\n\tclass MetaAttribs:\n\t\tname = None\n\t\ttype = None\n\t\twriteable = False\n\t\tupdateHandler = None\n\t\talignRight = False\n\t\tspaceX = None\n\t\tspaceY = None\n\t\twidth = None\n\t\theight = None\n\t\tvariableWidth = None\n\t\tvariableHeight = False\n\t\tautosizeWidth = False\n\t\thighlight = False\n\t\tlowlight = False\n\t\tcanHaveFocus = False\n\t\twithBorder = False\n\t\tsearchLook = False\n\t\tautoScrolldown = False\n\t\tdragHandler = None\n\t\tselectionChangeHandler = None\n\n\tupdateEventSlot = None\n\n\tdef __init__(self, addUpdateEvent=False, **kwargs):\n\t\t# Keep an index. This is so that we know the order of initialization later on.\n\t\t# This is better for the GUI representation so we can order it the same way\n\t\t# as it is defined in the class.\n\t\t# iterUserAttribs() uses this.\n\t\tself.__class__.staticCounter += 1\n\t\tself.index = self.__class__.staticCounter\n\n\t\tfor key in dir(self.MetaAttribs):\n\t\t\tif key.startswith(\"_\"): continue\n\t\t\tsetattr(self, key, getattr(self.MetaAttribs, key))\n\t\tfor key, value in kwargs.items():\n\t\t\tif key.startswith(\"_\"):\n\t\t\t\traise TypeError(\"meta attrib %r invalid\" % key)\n\t\t\tif not hasattr(self.MetaAttribs, key):\n\t\t\t\traise TypeError(\"meta attrib %r unknown\" % key)\n\t\t\tsetattr(self, key, value)\n\t\tself._addUpdateEvent = addUpdateEvent\n\n\tdef getTypeClass(self):\n\t\timport inspect\n\t\tif inspect.isclass(self.type): return self.type\n\t\treturn self.type.__class__\n\tdef isType(self, T):\n\t\treturn issubclass(self.getTypeClass(), T)\n\t@staticmethod\n\tdef _getUserAttribDict(inst):\n\t\tif not hasattr(inst, \"__userAttribs\"):\n\t\t\tsetattr(inst, \"__userAttribs\", {})\n\t\treturn inst.__userAttribs\n\t@classmethod\n\tdef _get(cls, name, inst):\n\t\treturn cls._getUserAttribDict(inst)[name]\n\tdef get(self, inst):\n\t\ttry: return self._get(self.name, inst)\n\t\texcept KeyError: return self.value\n\tdef __get__(self, inst, type=None):\n\t\tif inst is None: # access through class\n\t\t\treturn self\n\t\tif hasattr(self.value, \"__get__\"):\n\t\t\treturn self.value.__get__(inst, type)\n\t\treturn self.get(inst)\n\t@property\n\tdef callDeco(self):\n\t\tclass Wrapper:\n\t\t\tdef __getattr__(_self, item):\n\t\t\t\tf = getattr(self.value, item)\n\t\t\t\tdef wrappedFunc(arg): # a decorator expects a single arg\n\t\t\t\t\tvalue = f(arg)\n\t\t\t\t\treturn self(value)\n\t\t\t\treturn wrappedFunc\n\t\treturn Wrapper()\n\tdef hasUpdateEvent(self):\n\t\treturn self.updateEventSlot\n\tdef updateEvent(self, inst, type=None):\n\t\treturn self.updateEventSlot.__get__(inst, type)\n\t@classmethod\n\tdef _set(cls, name, inst, value):\n\t\tcls._getUserAttribDict(inst)[name] = value\n\tdef set(self, inst, value):\n\t\tself._set(self.name, inst, value)\n\tdef __set__(self, inst, value):\n\t\tif inst is None: # access through class\n\t\t\tself.value = value\n\t\t\treturn\n\t\tif hasattr(self.value, \"__set__\"):\n\t\t\tself.value.__set__(inst, value)\n\t\telse:\n\t\t\tself.set(inst, value)\n\t\tif self.hasUpdateEvent():\n\t\t\t# Do it in a separate thread because we don't expect that some __set__\n\t\t\t# could perform badly or even result in some recursive call.\n\t\t\timport TaskSystem\n\t\t\tTaskSystem.daemonThreadCall(self.updateEvent, args=(inst,), name=\"%r update event callback\" % self)\n\t@classmethod\n\tdef _getName(cls, obj):\n\t\tif hasattr(obj, \"name\"): return obj.name\n\t\telif hasattr(obj, \"func_name\"): return obj.func_name\n\t\telif hasattr(obj, \"fget\"): return cls._getName(obj.fget)\n\t\treturn None\n\tdef __call__(self, attrib):\n\t\tif not self.name:\n\t\t\tself.name = self._getName(attrib)\n\t\tif self._addUpdateEvent:\n\t\t\tself.updateEventSlot = initBy(initFunc=lambda inst: Event(), name=\"%s_updateEvent\" % self.name)\n\t\tself.value = attrib\n\t\treturn self\n\tdef __repr__(self):\n\t\treturn \"\" % (self.name, self.type)\n\ndef iterUserAttribs(obj):\n\tattribs = []\n\tfor attribName in dir(obj.__class__):\n\t\tattrib = getattr(obj.__class__, attribName)\n\t\tif attrib.__class__.__name__ == \"UserAttrib\":\n\t\t\tattribs += [attrib]\n\tattribs.sort(key = lambda attr: attr.index)\n\treturn attribs\n","repo_name":"albertz/music-player","sub_path":"src/UserAttrib.py","file_name":"UserAttrib.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"81"} +{"seq_id":"35131149406","text":"#! /usr/bin/env python3 \n\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom typing import Callable\n\ndef scalar_field_heatmap(func: Callable, # any function\n x_min: float, # minimal x\n x_max: float, # maximal x\n y_min: float, # minimal y\n y_max: float, # maximal_y\n x_steps: int=100, # counting point \n y_steps: int=100) -> None:\n # start plotting configuration \n figure = plt.figure()\n figure.set_size_inches(12, 12)\n\n # function as a vector\n func_vector = np.vectorize(func)\n # components\n x_component = np.linspace(x_min, x_max, x_steps)\n y_component = np.linspace(y_min, y_max, y_steps)\n x_component, y_component = np.meshgrid(x_component, y_component)\n z_component = func_vector(x_component, y_component)\n\n figure, ax = plt.subplots()\n comp = ax.pcolormesh(x_component, y_component, z_component, cmap='plasma')\n ax.axis([x_component.min(), x_component.max(), y_component.min(), y_component.max()])\n figure.colorbar(comp, ax=ax)\n plt.show()\n","repo_name":"golanghack/ai_only","sub_path":"clean_math/course_1/calculus/module_5/scalar_field_heatmap.py","file_name":"scalar_field_heatmap.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29474162624","text":"import numpy as np\nimport pandas as pd \nimport pickle \nimport torch.nn as nn\nfrom itertools import chain\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom mushroom_rl.environments import Environment, MDPInfo\nfrom mushroom_rl.utils import spaces\n\nfrom RL_for_reco.TorchModel import ModelMaker, FlexibleTorchModel\n\nclass Item_Reco(Environment):\n def __init__(self, items, gamma, horizon, trans_model_abs_path, item_dist=None):\n # MDP parameters\n\n # 1) discrete actions: list of item names or representing integers\n # 2) actions on n-dimensional space: list of a pair of min and max values per action\n self.items = items\n self.action_dim = len(self.items)\n if item_dist is None:\n if len(self.items.shape) == 1:\n if 'none' in self.items:\n self.item_dist = np.zeros(self.action_dim)\n self.item_dist[1:] = 1/(self.action_dim-1)\n else:\n self.item_dist = 1/(self.action_dim)\n else:\n self.item_dist = None\n else:\n self.item_dist = item_dist\n self.gamma = gamma ## discount factor\n self.horizon = horizon ## time limit to long\n self.trans_model = ModelMaker(FlexibleTorchModel, model_dir_path=trans_model_abs_path)\n self.trans_model_params = self.trans_model.model.state_dict()\n tmp = list(self.trans_model_params.keys())\n key = list(filter(lambda x: '0.weight' in x, tmp))[0]\n self.state_dim = self.trans_model_params[key].shape[1] - self.action_dim\n if 'none' in self.items:\n self.state_dim += 1\n\n MM_VAL = 100\n self.min_point = np.ones(self.state_dim) * -MM_VAL\n self.max_point = np.ones(self.state_dim) * MM_VAL\n \n if len(self.items.shape) == 1:\n self._discrete_actions = list(range(self.action_dim))\n else:\n self._discrete_actions = None\n\n # MDP properties\n observation_space = spaces.Box(low=self.min_point, high=self.max_point)\n if len(self.items.shape) == 1:\n action_space = spaces.Discrete(self.action_dim)\n else:\n action_space = spaces.Box(low=self.items[0][0], high=self.items[0][1])\n mdp_info = MDPInfo(observation_space, action_space, gamma, horizon)\n\n super().__init__(mdp_info)\n\n def reset(self, state=None):\n if state is None:\n self._state = np.zeros(self.state_dim)\n else:\n self._state = np.array(state)\n return self._state\n\n def step(self, action):\n if self._discrete_actions is None:\n next_state, reward = self.trans_model.infer(np.concatenate([self._state, action]))\n else:\n if 'none' in self.items:\n action_onehot = np.zeros(self.action_dim-1)\n if action > 0:\n action_onehot[action-1] = 1.0\n else:\n action_onehot = np.zeros(self.action_dim)\n action_onehot[action] = 1.0\n next_state, reward = self.trans_model.infer(np.concatenate([self._state, action_onehot]))\n \n return next_state, reward, False, {}\n\n\ndef predict_actions(agent, states, items, none_tree, n_jobs=None, labeled=True):\n actions = list(map(lambda x: agent.draw_action(x), np.array(states)))\n actions = np.array(list(chain(*actions)))\n if labeled:\n str_actions = np.array(items)[actions] \n if 'none' in str_actions:\n none_idx = np.array(range(len(str_actions)))[str_actions == 'none']\n\n if type(none_tree) == str:\n try:\n none_tree_md = pickle.load(open(none_tree, 'rb'))\n except:\n rec_idx = np.array(range(len(str_actions)))[str_actions != 'none']\n none_tree_md = RandomForestClassifier(n_jobs=n_jobs, n_estimators=50, class_weight='balanced', max_features=0.8, max_depth=5, criterion='entropy').fit(np.array(states)[rec_idx], str_actions[rec_idx])\n pickle.dump(none_tree_md, open(none_tree, 'wb'), 4)\n else:\n none_tree_md = none_tree\n\n print_df = pd.DataFrame([pd.value_counts(str_actions).to_dict()])\n none_mapped = none_tree_md.predict(np.array(states)[none_idx])\n str_actions[none_idx] = none_mapped\n print_df = pd.concat([print_df, pd.DataFrame([pd.value_counts(str_actions).to_dict()])])\n print(print_df)\n return str_actions\n else:\n return str_actions\n else:\n return actions","repo_name":"gowun/RL_for_reco","sub_path":"RL_for_reco/Item_Reco.py","file_name":"Item_Reco.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3291760476","text":"import cv2 as cv\n\n# cap = cv.VideoCapture(0)\ncap = cv.VideoCapture('videos/test1.mp4')\n\nif not cap.isOpened():\n print('无法打开相机')\n exit()\n\nwhile True:\n # 逐帧捕获\n ret, frame = cap.read()\n if not ret:\n print('错误')\n break\n # 我们在框架上的操作到这里\n gray = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)\n # 显示结果帧\n cv.imshow('frame', gray)\n if cv.waitKey(1) == ord('q'):\n break\ncap.release()\ncv.destroyAllWindows()","repo_name":"fengyunchangxuan/fycx-study-python","sub_path":"projects/opencv/video/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13316010677","text":"from flask import Blueprint\nfrom flask_restful import Api\n\nfrom .views import *\n\nblueprint = Blueprint('/cluster', __name__, static_folder='static')\napi = Api(blueprint)\n\napi.add_resource(ClusterResource, '/')\napi.add_resource(ClusterListResource, '')\n\napi.add_resource(ClusterDeploymentListResource, '//deployment')\n\napi.add_resource(ClusterServiceListResource, '//service')\n\napi.add_resource(ClusterIngressListResource, '//ingress')\n\n","repo_name":"megamcloud/arrplat","sub_path":"plugins/arrplat-k8s/controller/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4123185755","text":"# The thief has found himself a new place for his thievery again. There is only one entrance to this area,\n# called the \"root.\" Besides the root, each house has one and only one parent house. After a tour, the smart thief\n# realized that \"all houses in this place forms a binary tree\". It will automatically contact the police if two\n# directly-linked houses were broken into on the same night.\n#\n# Determine the maximum amount of money the thief can rob tonight without alerting the police.\n#\n# Example 1:\n#\n# Input: [3,2,3,null,3,null,1]\n#\n# 3\n# / \\\n# 2 3\n# \\ \\\n# 3 1\n#\n# Output: 7\n# Explanation: Maximum amount of money the thief can rob = 3 + 3 + 1 = 7.\n#\n# Example 2:\n#\n# Input: [3,4,5,1,3,null,1]\n#\n# 3\n# / \\\n# 4 5\n# / \\ \\\n# 1 3 1\n#\n# Output: 9\n# Explanation: Maximum amount of money the thief can rob = 4 + 5 = 9.\nfrom collections import deque\n\nfrom BinaryTrees import TreeNode, print_preorder\n\n\n# incorrect\n# def rob(root):\n# def dfs(node, lvlsw):\n# nonlocal maxval1, maxval2\n# if lvlsw:\n# maxval1 += node.val\n# else:\n# maxval2 += node.val\n# if node.left:\n# dfs(node.left, not lvlsw)\n# if node.right:\n# dfs(node.right, not lvlsw)\n#\n# if root is None:\n# return 0\n# maxval1 = maxval2 = 0\n# dfs(root, True)\n# return max(maxval1, maxval2)\n\ndef rob(root):\n if root is None:\n return 0\n\n q = deque([root])\n lvl_sums = []\n while q: # level order traversal\n lvl_sum = 0\n for _ in range(len(q)):\n node = q.popleft()\n lvl_sum += node.val # accumulate sum per level\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n lvl_sums.append(lvl_sum)\n # dp, get max sum with no adjacent elements of lvl_sums\n if len(lvl_sums) == 1:\n return lvl_sums[0]\n n = len(lvl_sums)\n dp = [0] * n\n dp[0] = lvl_sums[0]\n dp[1] = max(lvl_sums[0], lvl_sums[1])\n for i in range(2, n):\n dp[i] = max(dp[i - 1], dp[i - 2] + lvl_sums[i])\n return dp[-1]\n\n\n# arr = [3, 2, 3, None, 3, None, 1]\n# arr = [3, 4, 5, 1, 3, None, 1]\n# arr = [4, 1, None, 2, None, None, None, 3]\n# arr = [1]\narr = [3, 1, None, 2, None, None, None, None, 4]\nnodes = [TreeNode(v) if v else None for v in arr]\nroot = nodes[0]\nfor i, node in enumerate(nodes):\n left_idx = i * 2 + 1\n if left_idx < len(arr) and arr[i] is not None:\n node.left = nodes[left_idx]\n right_idx = i * 2 + 2\n if right_idx < len(arr) and arr[i] is not None:\n node.right = nodes[right_idx]\n\nprint_preorder(root)\nprint(rob(root))\n","repo_name":"vpc20/python-dynamic-programming","sub_path":"HouseRobberIII.py","file_name":"HouseRobberIII.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42557652983","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport warnings\n\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.preprocessing import StandardScaler, RobustScaler\n\nwarnings.filterwarnings(\"ignore\")\n\n\n\n\n\n\n\n# 전체 데이터셋 처리 \n\ndef drop_raw_df_na(df):\n df_tmp = df.copy()\n df_tmp = df_tmp.drop(df_tmp[df_tmp[\"senior\"].isna()].index).reset_index(drop = True)\n \n return df_tmp\n\n\n# 평당 가격 변환\ndef get_perprice(df):\n df_tmp = df.copy()\n df_tmp[\"perprice\"] = df_tmp[\"price\"]/df_tmp[\"area\"]\n df_tmp = df_tmp.drop(\"price\", axis = 1)\n \n return df_tmp\n\ndef impute_na(df):\n df_tmp = df.copy()\n \n imp_mean = IterativeImputer(random_state= 42)\n tmp_col = df_tmp.select_dtypes(exclude = [\"object\", \"category\"]).columns.to_list()\n df_tmp[tmp_col] = imp_mean.fit_transform(df_tmp[tmp_col])\n \n return df_tmp\n\n\n# 상위 k% row 제거\ndef drop_top_price(df, k):\n tmp_df = df.copy()\n top_price = tmp_df.groupby(\"gu\")[\"price\"].quantile(k).to_frame().T\n top_price_col = top_price.columns.to_list()\n\n\n for col in top_price_col: \n gu_idx = tmp_df[tmp_df[\"gu\"] == col][\"price\"].index\n# tmp_df.loc[gu_idx, \"price\"] = tmp_df.loc[gu_idx, \"price\"].apply(lambda x: top_price.loc[:, col][0] if x >= top_price.loc[:, col][0] else x)\n tmp_df.loc[gu_idx, \"price\"] = tmp_df.loc[gu_idx, \"price\"].apply(lambda x: np.nan if x >= top_price.loc[:, col][0] else x)\n tmp_df = tmp_df.dropna()\n tmp_df = tmp_df.reset_index(drop = True)\n \n return tmp_df\n\ndef get_pop_rate(df):\n tmp_df = df.copy()\n tmp_df['rate_male'] = tmp_df['male_kor'] / tmp_df['pop']\n tmp_df['rate_female'] = tmp_df['female_kor'] / tmp_df['pop']\n tmp_df['rate_male_f'] = tmp_df['male_for'] / tmp_df['pop']\n tmp_df['rate_female_f'] = tmp_df['female_for'] / tmp_df['pop']\n tmp_df['rate_senior'] = tmp_df['senior'] / tmp_df['pop']\n \n return tmp_df\n\n\n# -----------------------------------------------------------------------------------------------\n# 길, 로, 대로\ndef get_road(x) :\n con = x.split(' ')[0]\n if con[-2:] == '대로' :\n return 1\n elif con[-1:] == '로' :\n return 2\n else :\n return 3\n\n# encoding ----------------------------------------------------------------------------------------------- \n \ndef categorize(df):\n df_tmp = df.copy()\n \n df_tmp[\"gu\"] = df_tmp[\"gu\"].astype(\"category\")\n df_tmp[\"doro_trans\"] = df_tmp[\"doro_trans\"].astype(\"category\")\n# df_tmp[\"interest\"] = df_tmp[\"interest\"].astype(\"category\")\n df_tmp[\"floor_level\"] = df_tmp[\"floor_level\"].astype(\"category\")\n df_tmp[\"tradetype\"] = df_tmp[\"tradetype\"].astype(\"category\")\n df_tmp[\"dong\"] = df_tmp[\"dong\"].astype(\"category\")\n \n return df_tmp\n\n\n# scaling -----------------------------------------------------------------------------------------------\n\ndef get_log_scaled(df):\n \n df_tmp = df.copy()\n df_tmp = np.log1p(df_tmp)\n \n return df_tmp\n\n\n# F.E.-----------------------------------------------------------------------------------------------\n\ndef get_year_trans(x):\n year_trans = int(x.split(\"-\")[0])\n \n return year_trans\n\ndef get_year_gap(df):\n df_tmp = df.copy()\n df_tmp[\"year_gap\"] = df_tmp[\"year_trans\"] - df_tmp[\"built\"]\n \n return df_tmp\n\n\ndef get_floor_level(x):\n if x < 0:\n return \"under_0\"\n elif x <= 5:\n return \"btw_1_5\"\n elif x <= 20:\n return \"btw_6_20\"\n else:\n return \"over_20\"\n \n\n# 초기 데이터 학습용 검증용 분할; \n# 주의: 인코딩 작업 외 분포 확인 및 스케일링 작업은 분할 후 확인 \ndef split_test_train(df, preprocess = bool):\n \n \n global num_col, log_col, scale_col, cat_col\n \n df_tmp = df.copy()\n df_tmp = drop_raw_df_na(df_tmp)\n \n if preprocess == True:\n df_tmp = impute_na(df_tmp)\n# print(\"NA: \",df_tmp.isna().sum().sum())\n# print(\"oil drop\", df_tmp.shape)\n df_tmp = drop_top_price(df_tmp, 0.9)\n# print(\"top price drop\", df_tmp.shape)\n df_tmp = get_perprice(df_tmp)\n# print(\"perprice\", df_tmp.shape)\n# le = LabelEncoder()\n# df_tmp[\"dong_label\"] = le.fit_transform(df_tmp[\"dong\"])\n \n df_tmp[\"doro_trans\"] = df_tmp[\"doro\"].apply(lambda x: get_road(x))\n df_tmp[\"year_trans\"] = df_tmp[\"date\"].apply(lambda x: get_year_trans(x))\n df_tmp = get_year_gap(df_tmp)\n\n \n df_tmp[\"floor_level\"] = df_tmp[\"floor\"].apply(lambda x: get_floor_level(x))\n df_tmp[\"floor\"] = df_tmp[\"floor\"] + 3\n \n df_tmp = get_pop_rate(df_tmp)\n# print(df_tmp.shape)\n \n \n df_tmp = categorize(df_tmp)\n\n \n# print(df_tmp.shape)\n\n test = df_tmp[df_tmp[\"date\"].str.contains(\"2021\")].copy()\n# print(df.shape)\n# print(test.shape)\n data = df_tmp.drop(test.index, axis = 0)\n data = data.reset_index(drop = True)\n test = test.reset_index(drop = True)\n \n target = data[\"perprice\"]\n y_test_true = test[\"perprice\"]\n \n data = data.drop(\"perprice\", axis = 1)\n test = test.drop(\"perprice\", axis = 1)\n \n# target = data[\"price\"]\n# y_test_true = test[\"price\"]\n \n# data = data.drop(\"price\", axis = 1)\n# test = test.drop(\"price\", axis = 1)\n final_col = [\n# 'date', \n 'gu', \n 'dong', \n 'area',\n 'floor',\n# 'built',\n# 'doro',\n 'tradetype', \n 'interest',\n# 'growth',\n 'unemployment',\n 'inflation',\n 'stock',\n# 'house_debit',\n 'ex_dollar',\n# 'ex_yen',\n# 'household',\n 'pop',\n# 'male_kor',\n# 'female_kor',\n# 'male_for',\n# 'female_for',\n# 'rate_male',\n 'rate_female',\n 'rate_male_f', \n 'rate_female_f',\n 'rate_senior',\n 'perhold',\n# 'senior',\n# 'dong_label',\n 'doro_trans',\n# 'year_trans',\n 'year_gap',\n 'floor_level',\n 'oil_price'\n \n ]\n\n data = data[final_col]\n test = test[final_col]\n num_col = data.select_dtypes(exclude = [\"object\",\"category\"]).columns.to_list()\n log_col = [\"area\", \"perhold\", \"male_for\", \"female_for\", \"floor\"] \n cat_col = data.select_dtypes(include = [\"object\", \"category\"]).columns.to_list()\n \n return data, target, test, y_test_true\n\n \n \n\n \n# df_tmp = data.drop([\"perprice\",\"built\", \"doro\",\"growth\",\"ex_yen\",\"make_kor\", \"female_kor\", \"male_for\",\"female_for\", \"dong_label\", \"year_trans\"], axis = 1)\n \n# num_col = data.select_dtypes(exclude = [\"object\",\"category\"]).columns.to_list()\n# log_col = [\"area\", \"perhold\", \"male_for\", \"female_for\", \"floor\"] \n# cat_col = data.select_dtypes(include = [\"object\", \"category\"]).columns.to_list()\n# ---------------------------------------------------------------------------------------------\n \ndef scale_data(X_train, X_valid, X_test):\n tmp_X_train = X_train.copy()\n tmp_X_valid = X_valid.copy()\n tmp_X_test = X_test.copy()\n \n tmp_X_train = tmp_X_train.reset_index(drop = True)\n tmp_X_valid = tmp_X_valid.reset_index(drop = True)\n tmp_X_test = tmp_X_test.reset_index(drop = True)\n \n \n # scale\n sd_scaler = StandardScaler()\n# rb_scaler = RobustScaler()\n \n tmp_X_train[num_col] = sd_scaler.fit_transform(tmp_X_train[num_col])\n tmp_X_valid[num_col] = sd_scaler.transform(tmp_X_valid[num_col])\n tmp_X_test[num_col] = sd_scaler.transform(tmp_X_test[num_col])\n \n return tmp_X_train[final_col], tmp_X_valid[final_col], tmp_X_test[final_col]\n\n \nif __name__ == '__main__':\n split_test_train(df)\n \n \n \n\n \n","repo_name":"nealyoun/machine_learning","sub_path":"Projects/real_estate/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":8301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34122883522","text":"import concurrent.futures\r\nimport pickle\r\nimport re\r\nimport tempfile\r\nimport time\r\nimport xml.etree.ElementTree as ET\r\nfrom datetime import datetime, timedelta\r\nfrom urllib.request import urlopen as uReq\r\nimport matplotlib.pyplot as plt\r\nimport requests\r\nfrom pandas.plotting import table\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\nfrom webScraping import *\r\nfrom IndicatorObject import IndicatorObject\r\n\r\nimport yfinance as yf\r\n\"\"\"\r\n USEFUL FUNCTIONS\r\n\"\"\"\r\n\r\n\r\ndef pickleObjectList(objectList, fileName):\r\n with open(fileName, \"wb\") as f:\r\n pickle.dump(len(objectList), f)\r\n for value in objectList:\r\n pickle.dump(value, f)\r\n\r\n\r\ndef getBreadth(stock_object_list):\r\n advances, declines, volumeAdvances, volumeDeclines = 0, 0, 0, 0\r\n for stock in stock_object_list:\r\n if stock.dailyPctChange > 0:\r\n advances += 1\r\n volumeAdvances += stock.volume\r\n continue\r\n declines += 1\r\n volumeDeclines += stock.volume\r\n return [int(advances), int(declines), int(volumeAdvances), int(volumeDeclines)]\r\n\r\n\r\n\"\"\" \r\n\r\n INTRADAY UPDATES \r\n\r\n\"\"\"\r\n\r\n\r\ndef highFrequencyUpdateEuropeanStocks(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n\r\n if not \"09:15:00\" < datetime.strftime(datetime.now(), \"%X\") < \"18:15:00\": # Open times including GB/France/Germany\r\n return\r\n\r\n stockIndicatorObjects = []\r\n message = \"\"\r\n with open(pickledDataDirectoryPath + \"/CAC40.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with open(pickledDataDirectoryPath + \"/DAX.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with open(pickledDataDirectoryPath + \"/FTSE.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with concurrent.futures.ProcessPoolExecutor() as executor:\r\n for indicator, response in zip(stockIndicatorObjects,\r\n executor.map(IndicatorObject.highFrequencyCheck, stockIndicatorObjects)):\r\n try:\r\n increaseType = \"⬆\" if response[1] > 0 else \"🔻\"\r\n message += \" \".join([response[0], \":\", str(response[1]), \"%\", increaseType, '\\n'])\r\n except:\r\n pass\r\n if message:\r\n try:\r\n publisher.publish_tweet(\"Euro market in the last 15 minutes:\" + '\\n' + message)\r\n except:\r\n pass\r\n\r\ndef highFrequencyUpdateAmericanStocks(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n if not \"15:45:00\" <= datetime.strftime(datetime.now(), \"%X\") < \"22:15:00\": # Open times for US markets\r\n return\r\n stockIndicatorObjects = []\r\n message = \"\"\r\n with open(pickledDataDirectoryPath + \"/SP500.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with concurrent.futures.ProcessPoolExecutor() as executor:\r\n for indicator, response in zip(stockIndicatorObjects,\r\n executor.map(IndicatorObject.highFrequencyCheck, stockIndicatorObjects)):\r\n try:\r\n increaseType = \"⬆\" if response[1] > 0 else \"🔻\"\r\n message += \" \".join([response[0], \":\", str(response[1]), \"%\", increaseType, '\\n'])\r\n except:\r\n pass\r\n if message:\r\n publisher.publish_tweet(\"US market in the last 15 minutes:\" + '\\n' + message)\r\n\r\n\"\"\" \r\n\r\n DAILY UPDATES \r\n\r\n\"\"\"\r\n\r\n\r\ndef dailyUpdateEuropean(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n IndicesIndicatorObjects = []\r\n with open(pickledDataDirectoryPath + \"/Indices.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n IndicesIndicatorObjects.append(pickle.load(f))\r\n message = \"\"\r\n for i in range(3, 7):\r\n IndicesIndicatorObjects[i].checkForChange()\r\n message += IndicesIndicatorObjects[i].textToPublishDaily + '\\n'\r\n publisher.publish_tweet(message)\r\n\r\n\r\ndef dailyUpdateCommodities(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n IndicesIndicatorObjects = []\r\n with open(pickledDataDirectoryPath + \"/Indices.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n IndicesIndicatorObjects.append(pickle.load(f))\r\n message = \"\"\r\n for i in range(7, len(Indices)):\r\n IndicesIndicatorObjects[i].checkForChange()\r\n message += IndicesIndicatorObjects[i].textToPublishDaily + '\\n'\r\n publisher.publish_tweet(message)\r\n\r\n\r\ndef dailyUpdateAmerican(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n # if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n # return\r\n IndicesIndicatorObjects = []\r\n with open(pickledDataDirectoryPath + \"/Indices.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n IndicesIndicatorObjects.append(pickle.load(f))\r\n message = \"\"\r\n for i in range(0, 3):\r\n IndicesIndicatorObjects[i].checkForChange()\r\n message += IndicesIndicatorObjects[i].textToPublishDaily + '\\n'\r\n publisher.publish_tweet(message)\r\n\r\n\r\ndef endOfDayEuropeanStockMarket(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n stockIndicatorObjects = []\r\n top10spreads = pd.DataFrame(columns=['highLowSpread'])\r\n\r\n with open(pickledDataDirectoryPath + \"/CAC40.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with open(pickledDataDirectoryPath + \"/DAX.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with open(pickledDataDirectoryPath + \"/FTSE.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with concurrent.futures.ProcessPoolExecutor() as executor: # Rafraichit les données de la base et récupère les spreads high low\r\n for indicator, response in zip(stockIndicatorObjects,\r\n executor.map(IndicatorObject.getStats, stockIndicatorObjects)):\r\n top10spreads.loc[indicator] = indicator.highLowSpread\r\n\r\n pickleObjectList(stockIndicatorObjects[:40], pickledDataDirectoryPath + \"/CAC40.dat\")\r\n pickleObjectList(stockIndicatorObjects[40:70], pickledDataDirectoryPath + \"/DAX.dat\")\r\n pickleObjectList(stockIndicatorObjects[70:], pickledDataDirectoryPath + \"/FTSE.dat\")\r\n\r\n breadthDf = pd.DataFrame(columns=['Advances ⬆ï¸�' , 'Declines ⬇ï¸�','Volume ⬆ï¸� ', 'Volume ⬇ï¸�'])\r\n breadthDf.loc['CAC40'] = getBreadth(stockIndicatorObjects[:40])\r\n breadthDf.loc['FTSE'] = getBreadth(stockIndicatorObjects[70:])\r\n breadthDf.loc['DAX'] = getBreadth(stockIndicatorObjects[40:70])\r\n\r\n\r\n top10spreads = top10spreads.sort_values('highLowSpread', ascending=False).head(10).index\r\n top10spreadsData = pd.DataFrame()\r\n fig, axs = plt.subplots(2, 5)\r\n for i in range(10):\r\n top10spreadsData[top10spreads[i].tag] = top10spreads[i].EndOfDayStats()\r\n time.sleep(0.01)\r\n for i in range(10):\r\n axs[i // 5, i % 5].boxplot(top10spreadsData[top10spreads[i].tag])\r\n axs[i // 5, i % 5].set_title(top10spreads[i].tag)\r\n axs[i // 5, i % 5].axes.get_xaxis().set_ticks([])\r\n axs[i // 5, i % 5].tick_params(axis='y', labelsize=7)\r\n time.sleep(0.01)\r\n\r\n fig.subplots_adjust(hspace=0.5, wspace=1.5)\r\n plt.savefig(repositoryDirectory + '/EODeuroMarket' + str(datetime.today().date()) + '.png', dpi=199)\r\n\r\n plt.clf()\r\n\r\n fig, axs = plt.subplots(2,1) # no visible frame\r\n axs[0].patch.set_visible(False)\r\n axs[1].patch.set_visible(False)\r\n\r\n axs[0].axis('off')\r\n axs[1].axis('off')\r\n\r\n table(ax=axs[0], data=breadthDf)\r\n plt.savefig(repositoryDirectory +\"/EUbreadth\"+str(datetime.today().date()) +'.png')\r\n\r\n publisher.tweet_image(\"EUROPE: Top 10 movements in blue-chip stock market / Advance - Decline\",\r\n [repositoryDirectory + '/EODeuroMarket' + str(datetime.today().date()) + '.png',\r\n repositoryDirectory + \"/EUbreadth\" + str(datetime.today().date()) + '.png']\r\n )\r\n\r\n\r\ndef endOfDayUsStockMarket(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n\r\n stockIndicatorObjects = []\r\n top10spreads = pd.DataFrame(columns=['highLowSpread'])\r\n\r\n with open(pickledDataDirectoryPath + \"/SP500.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with concurrent.futures.ProcessPoolExecutor() as executor: # Rafraichit les données de la base et récupère les spreads high low\r\n for indicator, response in zip(stockIndicatorObjects,\r\n executor.map(IndicatorObject.getStats, stockIndicatorObjects)):\r\n top10spreads.loc[indicator] = indicator.highLowSpread\r\n\r\n with concurrent.futures.ProcessPoolExecutor() as executor: # Rafraichit les données de la base et récupère les spreads high low\r\n for indicator, response in zip(stockIndicatorObjects,\r\n executor.map(IndicatorObject.checkForChange, stockIndicatorObjects)):\r\n top10spreads.loc[indicator] = indicator.highLowSpread\r\n\r\n pickleObjectList(stockIndicatorObjects, pickledDataDirectoryPath + \"/SP500.dat\")\r\n\r\n breadthDf = pd.DataFrame(columns=['Advances ⬆ï¸�', 'Declines ⬇ï¸�', 'Volume ⬆ï¸� ', 'Volume ⬇ï¸�'])\r\n breadthDf.loc['SP500'] = getBreadth(stockIndicatorObjects)\r\n\r\n top10spreads = top10spreads.sort_values('highLowSpread', ascending=False).head(10).index\r\n top10spreadsData = pd.DataFrame()\r\n fig, axs = plt.subplots(2, 5)\r\n for i in range(10):\r\n top10spreadsData[top10spreads[i].tag] = top10spreads[i].EndOfDayStats()\r\n time.sleep(0.01)\r\n for i in range(10):\r\n axs[i // 5, i % 5].boxplot(top10spreadsData[top10spreads[i].tag])\r\n axs[i // 5, i % 5].set_title(top10spreads[i].tag)\r\n axs[i // 5, i % 5].axes.get_xaxis().set_ticks([])\r\n axs[i // 5, i % 5].tick_params(axis='y', labelsize=7)\r\n time.sleep(0.01)\r\n\r\n fig.subplots_adjust(hspace=0.5, wspace=1.5)\r\n\r\n plt.savefig(repositoryDirectory + '/EODUSMarket' + str(datetime.today().date()) + '.png', dpi=199)\r\n\r\n plt.clf()\r\n\r\n fig, axs = plt.subplots(2, 1) # no visible frame\r\n axs[0].patch.set_visible(False)\r\n axs[1].patch.set_visible(False)\r\n\r\n axs[0].axis('off')\r\n axs[1].axis('off')\r\n\r\n table(ax=axs[0], data=breadthDf)\r\n plt.savefig(repositoryDirectory + \"/USbreadth\" + str(datetime.today().date()) + '.png')\r\n\r\n publisher.tweet_image(\"US : Top 10 movements in blue-chip stock market / Advance - Decline\",\r\n [repositoryDirectory + '/EODUSMarket' + str(datetime.today().date()) + '.png',\r\n repositoryDirectory + \"/USbreadth\" + str(datetime.today().date()) + '.png']\r\n )\r\n\r\n\r\ndef checkForChangesEUR(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n stockIndicatorObjects = []\r\n top10spreads = pd.DataFrame(columns=['highLowSpread'])\r\n message=\"\"\r\n with open(pickledDataDirectoryPath + \"/CAC40.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with open(pickledDataDirectoryPath + \"/DAX.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with open(pickledDataDirectoryPath + \"/FTSE.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with concurrent.futures.ProcessPoolExecutor() as executor:\r\n for indicator, response in zip(stockIndicatorObjects,\r\n executor.map(IndicatorObject.checkForChange, stockIndicatorObjects)):\r\n if response:\r\n publisher.publish_tweet(response)\r\n\r\n\r\ndef checkForChangesUS(publisher):\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n stockIndicatorObjects = []\r\n message = \"\"\r\n\r\n with open(pickledDataDirectoryPath + \"/SP500.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n stockIndicatorObjects.append(pickle.load(f))\r\n\r\n with concurrent.futures.ProcessPoolExecutor() as executor:\r\n for indicator, response in zip(stockIndicatorObjects,\r\n executor.map(IndicatorObject.checkForChange, stockIndicatorObjects)):\r\n if response:\r\n publisher.publish_tweet(response)\r\n\r\n\r\ndef updateEuropeanRateFile():\r\n if not datetime.today().weekday() <= 5: # If it's not a weekday, no update\r\n return\r\n link = 'https://sdw-wsrest.ecb.europa.eu/service/data/YC/B.U2.EUR.4F.G_N_A+G_N_C.SV_C_YM.?lastNObservations=1'\r\n byteData = requests.get(link).content.splitlines(keepends=True)\r\n byteData = [b''] + byteData[12:-1]\r\n byteData = b''.join(byteData)\r\n byteData = re.sub(b'generic:', b'', re.sub(b'message:', b'', byteData))\r\n f = tempfile.TemporaryFile()\r\n f.write(byteData)\r\n f.seek(0)\r\n tree = ET.parse(f)\r\n Spot_Rates = pd.DataFrame(columns=Maturities_Spot_Rate)\r\n Instant_Forward_Rates = pd.DataFrame(columns=Maturities_Instant_Forward)\r\n\r\n for period in Maturities_Spot_Rate:\r\n for element in (tree.findall(\"./Series/SeriesKey/Value[@value='\" + period + \"']/../../Obs\")):\r\n date = element.find('./ObsDimension').attrib['value']\r\n value = float(element.find('./ObsValue').attrib['value'])\r\n Spot_Rates.at[date, period] = value\r\n\r\n for period in Maturities_Instant_Forward:\r\n for element in (tree.findall(\"./Series/SeriesKey/Value[@value='\" + period + \"']/../../Obs\")):\r\n date = element.find('./ObsDimension').attrib['value']\r\n value = float(element.find('./ObsValue').attrib['value'])\r\n Instant_Forward_Rates.at[date, period] = value\r\n\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n Spot_Rates.to_csv(rateFileDirectory + '/Spot_Rates.csv', sep='\\t', mode='a', header=None)\r\n Instant_Forward_Rates.to_csv(rateFileDirectory + '/Instant_Forward_Rates.csv', sep='\\t', mode='a', header=None)\r\n\r\n return Spot_Rates.T, Instant_Forward_Rates.T # Transposés pour pouvoir être plotés\r\n\r\n\r\ndef updateUSRateFile():\r\n if not datetime.today().weekday() <= 5: # If it's not a weekday, no update\r\n return\r\n link = 'https://www.treasury.gov/resource-center/data-chart-center/interest-rates/Pages/TextView.aspx?data=yieldYear&year=2020'\r\n\r\n def floatify(string):\r\n try:\r\n return float(string)\r\n except:\r\n return 'N/A'\r\n\r\n uClient = uReq(link)\r\n page = uClient.read()\r\n import codecs\r\n page_soup_html = soup(codecs.decode(page, 'utf-8'), \"html.parser\")\r\n data = page_soup_html.find(\"table\", {\"class\": [\"t-chart\"]}).findAll('tr')\r\n treasury_yields = pd.DataFrame(columns=[string.text for string in data[0].findAll('th')[1:]])\r\n\r\n for element in data:\r\n row = element.findAll('td')\r\n if (row):\r\n treasury_yields.loc[datetime.strptime(row[0].text, \"%m/%d/%y\")] = [floatify(element.text) for element in\r\n row[1:]]\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n treasury_yields.to_csv(rateFileDirectory + '/treasuryTest.csv', sep='\\t', mode='a', header=None)\r\n return treasury_yields.T[-1]\r\n\r\n\r\ndef rateUpdate(): # Only use is to schedule them both at the same time\r\n if not datetime.today().weekday() <= 4: # If it's not a weekday, no update\r\n return\r\n updateEuropeanRateFile()\r\n updateUSRateFile()\r\n\r\n\r\n\"\"\"\r\n\r\n WEEKLY UPDATES\r\n \r\n\"\"\"\r\n\r\n\r\ndef YieldCurve(publisher):\r\n US_X_axis = [0.08333333, 0.16666667, 0.25, 0.5, 1.,\r\n 2., 3., 5., 7., 10.,\r\n 20., 30.]\r\n EU_X_axis = [0.25, 0.5, 0.75, 1., 2., 3., 4., 5., 6.,\r\n 7., 8., 9., 10., 11., 12., 13., 14., 15.,\r\n 16., 17., 18., 19., 20., 21., 22., 23., 24.,\r\n 25., 26., 27., 28., 29., 30.]\r\n Spot_Rates, Instant_Forward_Rates = updateEuropeanRateFile() # Deja transposées ici\r\n US_spot_rate = updateUSRateFile()\r\n Spot_Rates['X_axis'] = EU_X_axis\r\n Instant_Forward_Rates['X_axis'] = EU_X_axis\r\n US_spot_rate['X_axis'] = US_X_axis\r\n plt.plot(Spot_Rates['X_axis'], Spot_Rates.iloc[:, 0], label='Spot rate Euro area', marker='+')\r\n plt.plot(Instant_Forward_Rates['X_axis'], Instant_Forward_Rates.iloc[:, 0], label='Instant Forward Euro are',\r\n marker='+')\r\n plt.plot(US_spot_rate['X_axis'], US_spot_rate.iloc[:, 0], label='US Spot treasury yield ', marker='+')\r\n plt.xlabel('Years')\r\n plt.ylabel('Rate(%)')\r\n plt.legend(loc=4, prop={'size': 7})\r\n plt.savefig(repositoryDirectory + '/Yield_' + Spot_Rates.keys()[0] + '.png', dpi=199)\r\n publisher.tweet_image(\"Yield curve : \" + str((datetime.today() - timedelta(days=1)).strftime('%d %b %Y')),\r\n repositoryDirectory + '/Yield_' + Spot_Rates.keys()[0] + '.png')\r\n return repositoryDirectory + 'Yield' + Spot_Rates.keys()[0] + '.png'\r\n\r\n\r\ndef InflationCurve(publisher):\r\n inflationIndicatorObjects = []\r\n with open(pickledDataDirectoryPath + \"/Inflation.dat\", \"rb\") as f:\r\n for _ in range(pickle.load(f)):\r\n inflationIndicatorObjects.append(pickle.load(f))\r\n\r\n plt.ylabel('Consumer Price Index (%)')\r\n for element in inflationIndicatorObjects:\r\n element.refresh()\r\n element.Monthly['Taux'].plot(label=element.tag.replace('Inflation', ''), linewidth=0.4, marker='+')\r\n time.sleep(0.001)\r\n pickleObjectList(inflationIndicatorObjects, pickledDataDirectoryPath + \"/Inflation.dat\")\r\n plt.legend(loc=0, prop={'size': 6})\r\n plt.savefig(repositoryDirectory + \"/Inflation_\" + str(datetime.today().date()) + \".png\", dpi=199)\r\n publisher.tweet_image(\"〽Inflation rates (basis = Consumer Price Index)〽\",\r\n repositoryDirectory + \"/Inflation_\" + str(datetime.today().date()) + \".png\")\r\n\r\n\r\ndef compareYieldCurves(publisher):\r\n dataSpot = pd.read_csv(rateFileDirectory + '/Spot_Rates.csv', sep='\\t')\r\n dataForward = pd.read_csv(rateFileDirectory + '/Instant_Forward_Rates.csv', sep='\\t')\r\n EU_X_axis = [0.25, 0.5, 0.75, 1., 2., 3., 4., 5., 6.,\r\n 7., 8., 9., 10., 11., 12., 13., 14., 15.,\r\n 16., 17., 18., 19., 20., 21., 22., 23., 24.,\r\n 25., 26., 27., 28., 29., 30.]\r\n plt.plot(EU_X_axis, dataSpot.iloc[-1], label='EU spot Today', marker='+')\r\n plt.plot(EU_X_axis, dataSpot.iloc[-260], label='EU spot 1 year ago', marker='+')\r\n plt.plot(EU_X_axis, dataSpot.iloc[-1300], label='EU spot 5 years ago', marker='+')\r\n plt.plot(EU_X_axis, dataForward.iloc[-1], label='EU forward Today', ls='dotted')\r\n plt.plot(EU_X_axis, dataForward.iloc[-260], label='EU forward 1 year ago', ls='dotted')\r\n plt.plot(EU_X_axis, dataForward.iloc[-1300], label='EU forward 5 years ago', ls='dotted')\r\n plt.xticks(rotation='vertical', size=8)\r\n plt.legend(loc=4, prop={'size': 7})\r\n plt.savefig(repositoryDirectory + \"/EUyieldCurves5years_\" + str(datetime.today().date()) + \".png\", dpi=199)\r\n plt.clf()\r\n dataSpotUS = pd.read_csv(rateFileDirectory + '/treasury_yields.csv', sep='\\t')\r\n US_X_axis = [0.08333333, 0.16666667, 0.25, 0.5, 1.,\r\n 2., 3., 5., 7., 10.,\r\n 20., 30.]\r\n plt.plot(US_X_axis, dataSpotUS.iloc[-1][1:], label='US treasury yield Today', marker='+')\r\n plt.plot(US_X_axis, dataSpotUS.iloc[-260][1:], label='US treasury yield 1 years ago', marker='+')\r\n plt.plot(US_X_axis, dataSpotUS.iloc[-1300][1:], label='US treasury yield 5 years ago', marker='+')\r\n plt.xticks(rotation='vertical', size=8)\r\n plt.legend(loc=4, prop={'size': 7})\r\n plt.savefig(repositoryDirectory + \"/USyieldCurves5years_\" + str(datetime.today().date()) + \".png\", dpi=199)\r\n publisher.tweet_image(\"Yield curves the last 5 years\",\r\n [repositoryDirectory + \"/EUyieldCurves5years_\" + str(datetime.today().date()) + \".png\",\r\n repositoryDirectory + \"/USyieldCurves5years_\" + str(datetime.today().date()) + \".png\"]\r\n )\r\n\r\n\r\ndef governmentRateUpdate(publisher):\r\n with open(pickledDataDirectoryPath + \"/Government.dat\", \"rb\") as f:\r\n object=pickle.load(f)\r\n object.checkGovernmentRateChange(publisher)\r\n\r\n\r\n\"\"\"\r\n MONTHLY UPDATES / PUBICATIONS\r\n\"\"\"\r\n\r\n\r\ndef monthlyLaborReport(publisher):\r\n if datetime.today().day <= 7:\r\n publisher.publish_tweet(\r\n \"Publication du Rapport mensuel sur l'emploi américain : \" + \"https://www.bls.gov/opub/mlr/\")\r\n\r\n\r\ndef getEconomicSentiment(publisher):\r\n Link = \"https://www.mql5.com/en/economic-calendar/european-union/zew-indicator-of-economic-sentiment\"\r\n import requests\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}\r\n result = requests.get(Link, headers=headers)\r\n data = soup(result.content,'html5lib')\r\n actualDate = data.find(\"td\", {\"id\": \"actualValueDate\"})['data-date'].replace(\" \", \"\")\r\n if not datetime.fromtimestamp(int(actualDate)/1000).date() == datetime.today().date() :\r\n return\r\n nextDate = data.find(\"td\",{\"id\":\"nextValueDate\"})['data-date'].replace(\" \",\"\")\r\n actualRow = data.findAll(\"div\" , {\"class\":\"event-table-history__item\"})[0]\r\n actualNumber = \"\".join(actualRow.find(\"div\" , {\"class\":\"event-table-history__actual green\"}).find(\"span\").text.split())\r\n previousNumber = \"\".join(actualRow.find(\"div\" , {\"class\":\"event-table-history__previous\"}).text.split())\r\n publisher.publish_tweet(\" \".join([\"Zew indicator of economic sentiment :\",actualNumber,\"(Previous result :\",previousNumber,\")\\nNext release\",str(datetime.fromtimestamp(int(nextDate)/1000).date())]))\r\n","repo_name":"youssk541/fnstatz","sub_path":"docs/updates.py","file_name":"updates.py","file_ext":"py","file_size_in_byte":22849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4147834321","text":"student_with_grade = {}\nfor _ in range(int(input())):\n name = input()\n score = float(input())\n student_with_grade[name] = score\nstudent_grade_list = [[i, j] for i, j in student_with_grade.items()]\nsorted_list = sorted(student_grade_list, key = lambda x:x[1])\nonly_grade = [i[1] for i in sorted_list]\nmylist = list(dict.fromkeys(only_grade))\nmin_grade = mylist[0]\nmylist.remove(min_grade)\nsecond_min_grade = mylist[0]\nlow_grd_std = []\nfor i in sorted_list:\n if i[1]==second_min_grade:\n low_grd_std.append(i)\nfinal_sorted_list = sorted(low_grd_std, key = lambda x:x[0])\nfor name in final_sorted_list:\n print(name[0])\n","repo_name":"mehedi1344/My-Module","sub_path":"python_practice/second_lowest_grade.py","file_name":"second_lowest_grade.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6954865455","text":"# Faulty Calculator\n# Design a calculator which will correctly solve all calculation except following one.\n# 45*3 = 555, 56+9 = 77, 56/6 = 4\n# take input as operator and two numbers from user and return the result\n\ndef calc(args):\n if args.x == 45 and args.y == 3 and args.o == 'mul':\n return 'Output is', 555\n elif args.x == 56 and args.y == 9 and args.o == 'add':\n return \"Output is\", 77\n elif args.x == 56 and args.y == 6 and args.o == 'div':\n return \"Output is\", 4\n elif args.x == int and args.y == int and args.o == 'sub':\n return \"Output is\", args.x - args.y\n\n elif args.o == 'mul':\n return args.x * args.y\n elif args.o == 'add':\n return args.x + args.y\n elif args.o == 'div':\n return args.x / args.y\n elif args.o == 'sub':\n return args.x - args.y\n else:\n return 'Something went wrong'\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--x', type=float, default=1.0,\n help=\"Enter first number, this is a utility for calculations. \" \\\n \"Please contact admin for more info\")\n parser.add_argument('--y', type=float, default=3.0,\n help=\"Enter second number, this is a utility for calculations.\"\n \"Please contact admin for more info\")\n parser.add_argument('--o', type=str, default='add',\n help=\"Enter operator, i.e 'add', 'sub', 'mul', 'div', this is a utility for calculations.\"\n \"Please contact admin for more info\")\n args = parser.parse_args()\n sys.stdout.write(str(calc(args)))\n","repo_name":"PriyanshuChandel/Python_Beginner_Exercises","sub_path":"Faulty-Calculator-CommandLineUtility.py","file_name":"Faulty-Calculator-CommandLineUtility.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3318014873","text":"# Merge sort . This is one of the efficient algorithms \r\n# O(nlogn) complexity\r\n\r\nX = [10,4,5,24,6,1,3, 7, 11, 2, 1]\r\n\r\n#In merge sort break the nodes into an array of elements \r\n\r\n# We use concept of recursion here \r\ndef mergesort(x):\r\n\tif len(x) > 1:\t\r\n\t\t#The first task is to break the array into two\r\n\r\n\t\t# find the mid point \r\n\r\n\t\tmidpoint = round(len(x)/2)\r\n\r\n\t\tright_array = x[:(midpoint)]\r\n\t\tleft_array = x[(midpoint):]\r\n\t \r\n\t\tprint(right_array)\r\n\t\tprint(left_array)\r\n\t\t# iteratively calling the function again\r\n\r\n\t\tmergesort(right_array)\r\n\t\tmergesort(left_array)\r\n\r\n\r\n\t\t# now we will be having the arrays\r\n\r\n\t\ti = j = k = 0\r\n\r\n # copy arrays into temporary variables\r\n\r\n\t\twhile i < len(left_array) and j < len(right_array):\r\n\t\t\tif left_array[i] < right_array[j]:\r\n\t\t\t\tx[k] = left_array[i]\r\n\t\t\t\ti+=1\r\n\t\t\telse:\r\n\t\t\t\tx[k] = right_array[j]\r\n\t\t\t\tj+=1\r\n\t\t\tk+=1\r\n\r\n # checking for left out elements\r\n\t\twhile i < len(left_array):\r\n\t\t\tx[k] = left_array[i]\r\n\t\t\ti+=1\r\n\t\t\tk+=1\r\n\t\twhile j < len(right_array):\r\n\t\t\tx[k] = right_array[j]\r\n\t\t\tj+=1\r\n\t\t\tk+=1\r\n\treturn x \r\n\r\n\r\nprint(mergesort(X))","repo_name":"bha123/DataStructures_Algorithms","sub_path":"Mergesort.py","file_name":"Mergesort.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25933581933","text":"import json\nimport copy\nimport numpy as np\n\n\ndef main():\n # List of all the annotation types that should be used\n ann_types = [\"keypoints\", \"foot_kpts\", \"face_kpts\", \"lefthand_kpts\", \"righthand_kpts\"]\n\n orig_file = \"../../data-mscoco/annotations_wholebody/coco_wholebody_train_v1.0.json\"\n new_file = \"../../data-mscoco/annotations/\"\n \"final_person_keypoints_train2017_wholebody_pifpaf_style.json\"\n\n # =============================================================================\n # orig_file = \"../../data-mscoco/annotations_wholebody/coco_wholebody_val_v1.0.json\"\n # new_file = \"../../data-mscoco/annotations/\"\n # \"final_person_keypoints_val2017_wholebody_pifpaf_style.json\"\n # =============================================================================\n\n handle_validity = True\n drop_attribute_list = [\"face_box\", \"face_kpts\", \"face_valid\", \"foot_kpts\", \"foot_valid\",\n \"lefthand_box\", \"lefthand_kpts\", \"lefthand_valid\",\n \"num_keypoints\", \"righthand_box\", \"righthand_kpts\",\n \"righthand_valid\", \"segmentation\"]\n with open(orig_file, 'r') as f:\n orig_data = json.load(f)\n new_data = copy.deepcopy(orig_data)\n new_data[\"annotations\"] = []\n discard_count = 0\n crowd_count = 0\n for ann_dict in orig_data[\"annotations\"]:\n if not all(x == 0 for x in ann_dict[\"keypoints\"]): # If all zero, only bbox\n new_dict = copy.deepcopy(ann_dict)\n for entry in drop_attribute_list:\n new_dict.pop(entry)\n ann = []\n for key in ann_types:\n ann = ann + ann_dict[key]\n if handle_validity:\n for jj, name in enumerate([\"face\", \"foot\", \"lefthand\", \"righthand\"]):\n if not ann_dict[name + \"_valid\"]:\n if name == \"face\":\n if np.any(np.array(ann[23 * 3:91 * 3])) > 0:\n print(\"face\")\n ann[23 * 3:91 * 3] = [0.0] * 68\n elif name == \"foot\":\n if np.any(np.array(ann[17 * 3:23 * 3])) > 0:\n print(\"foot\")\n ann[17 * 3:23 * 3] = [0.0] * 6\n elif name == \"lefthand\":\n if np.any(np.array(ann[91 * 3:112 * 3])) > 0:\n print(\"LH\")\n print(ann_dict[\"image_id\"])\n print(ann[91 * 3:112 * 3])\n ann[91 * 3:112 * 3] = [0.0] * 21\n elif name == \"righthand\":\n if np.any(np.array(ann[112 * 3:133 * 3])) > 0:\n print(\"RH\")\n ann[112 * 3:133 * 3] = [0.0] * 21\n else:\n raise Exception(\"Unknown\")\n if name != \"foot\":\n bb = ann_dict[name + \"_box\"]\n area = bb[2] * bb[3]\n im_id = ann_dict[\"image_id\"]\n old_id = ann_dict[\"id\"]\n mask_id = int(str(old_id) + '00' + str(jj))\n if area > 0.0:\n new_data[\"annotations\"].append({\n 'image_id': im_id,\n 'category_id': 1,\n 'iscrowd': 1,\n 'id': mask_id,\n 'area': area,\n 'bbox': bb,\n 'num_keypoints': 0,\n 'keypoints': [0.0] * 133 * 3,\n 'segmentation': []})\n crowd_count += 1\n new_dict[\"keypoints\"] = ann\n new_dict['num_keypoints'] = sum(x > 0 for x in ann[2::3])\n new_data[\"annotations\"].append(new_dict)\n else:\n discard_count += 1\n with open(new_file, 'w') as f:\n json.dump(new_data, f)\n print(\"\\nCreated a new json file with \" + str(len(new_data[\"annotations\"]))\n + \" annotations of which \" + str(crowd_count) + \" were crowd annotations and discarded \"\n + str(discard_count) + \" annotations from the original file.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"openpifpaf/openpifpaf","sub_path":"src/openpifpaf/plugins/wholebody/helper_scripts/Get_annotations_from_coco_wholebody.py","file_name":"Get_annotations_from_coco_wholebody.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","stars":1098,"dataset":"github-code","pt":"81"} +{"seq_id":"27916641948","text":"from pygments import highlight\nfrom pygments.lexers import get_lexer_for_filename,get_lexer_by_name\nfrom pygments.formatters import TerminalFormatter\nfrom sys import exit\nimport abc\nclass BaseFormatter(metaclass=abc.ABCMeta):\n \"\"\"\n\n handles the formatting of the code displayed\n\n\n \"\"\"\n def __init__(self,file,language=None,direct=False):\n self.language=language\n if direct:\n self.content = file\n else:\n self.file = file\n # for the time being reading the whole file in buffer\n\n def _read(self,file):\n try:\n with open(file,'r') as open_file:\n return open_file.read()\n\n except FileNotFoundError as e:\n print(e)\n exit(0)\n\n @abc.abstractmethod\n def format(self):\n \ttry:\n \t\treturn self.content\n \texcept:\n \t\treturn self._read(self.file)\n\n\nclass PlainFormatter(BaseFormatter):\n\n\n\tdef format(self):\n\t\treturn super().format()\n\n\nclass PygmentFormatter(BaseFormatter):\n \"\"\"\n\n handles the formatting of the code displayed\n\n\n \"\"\"\n def format(self):\n try:\n self.lexer = get_lexer_for_filename(self.file)\n except Exception as e:\n if self.language !=None:\n self.lexer = get_lexer_by_name(self.language)\n else:\n raise Exception(\"no suitable lexer found\")\n\n formatter = TerminalFormatter()\n\n return highlight(self._read(),self.lexer,formatter)\n\n\ndef initialize_color_pairs():\n curses.init_pair(1,curses.COLOR_RED,curses.COLOR_BLACK)\n curses.init_pair(2,curses.COLOR_GREEN,curses.COLOR_BLACK)\n \n","repo_name":"girishramnani/t3","sub_path":"t3/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12452092797","text":"import unittest\n\nfrom ...constants import * # NOQA\nfrom . import ArchiverTestCaseBase, RemoteArchiverTestCaseBase, ArchiverTestCaseBinaryBase, RK_ENCRYPTION, BORG_EXES\n\n\nclass ArchiverTestCase(ArchiverTestCaseBase):\n def test_transfer(self):\n def check_repo(repo_option):\n listing = self.cmd(repo_option, \"rlist\", \"--short\")\n assert \"arch1\" in listing\n assert \"arch2\" in listing\n listing = self.cmd(repo_option, \"list\", \"--short\", \"arch1\")\n assert \"file1\" in listing\n assert \"dir2/file2\" in listing\n self.cmd(repo_option, \"check\")\n\n self.create_test_files()\n repo1 = f\"--repo={self.repository_location}1\"\n repo2 = f\"--repo={self.repository_location}2\"\n other_repo1 = f\"--other-repo={self.repository_location}1\"\n\n self.cmd(repo1, \"rcreate\", RK_ENCRYPTION)\n self.cmd(repo1, \"create\", \"arch1\", \"input\")\n self.cmd(repo1, \"create\", \"arch2\", \"input\")\n check_repo(repo1)\n\n self.cmd(repo2, \"rcreate\", RK_ENCRYPTION, other_repo1)\n self.cmd(repo2, \"transfer\", other_repo1, \"--dry-run\")\n self.cmd(repo2, \"transfer\", other_repo1)\n self.cmd(repo2, \"transfer\", other_repo1, \"--dry-run\")\n check_repo(repo2)\n\n\nclass RemoteArchiverTestCase(RemoteArchiverTestCaseBase, ArchiverTestCase):\n \"\"\"run the same tests, but with a remote repository\"\"\"\n\n\n@unittest.skipUnless(\"binary\" in BORG_EXES, \"no borg.exe available\")\nclass ArchiverTestCaseBinary(ArchiverTestCaseBinaryBase, ArchiverTestCase):\n \"\"\"runs the same tests, but via the borg binary\"\"\"\n","repo_name":"0xallie/borg","sub_path":"src/borg/testsuite/archiver/transfer_cmd.py","file_name":"transfer_cmd.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"43147190609","text":"import configparser\nimport pandas as pd\nimport os\nimport uuid\nfrom pyspark.sql import SparkSession\n\n\ndef spark_generator():\n \"\"\"\n create a spark session\n :return:spark session\n \"\"\"\n spark = SparkSession.builder. \\\n config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .config(\"spark.hadoop.fs.s3a.impl\", \"org.apache.hadoop.fs.s3a.S3AFileSystem\") \\\n .config(\"spark.driver.memory\", \"15g\") \\\n .enableHiveSupport().getOrCreate()\n spark.conf.set(\"spark.sql.execution.arrow.enabled\", \"true\")\n return spark\n\n\ndef immigration_data(year, month):\n \"\"\"\n create parquet file of immigration data group by year and month\n :param year: group by which year\n :param month: group by which month (alphabetic abbreviation)\n :return:parquet file of immigration data\n \"\"\"\n i94 = pd.read_sas(('i94_' + str(month) + str(year) + '_sub.sas7bdat'), 'sas7bdat',\n encoding=\"ISO-8859-1\").drop_duplicates()\n i94['id_'] = pd.Series([str(uuid.uuid1()) for each in range(len(i94))])\n i94['arrival_date'] = pd.to_timedelta(i94['arrdate'], unit='D') + pd.Timestamp('1960-1-1')\n i94 = spark.createDataFrame(i94)\n i94.createOrReplaceTempView('i94')\n sql = \"\"\"SELECT i94yr AS year,i94mon AS month,i94cit AS citizenship,\n i94res AS resident,i94port AS port,\n arrival_date,i94mode AS mode,\n i94addr AS us_state,depdate AS depart_date,\n i94bir AS age,i94visa visa_category,\n dtadfile AS date_added,visapost AS visa_issued_by,\n occup AS occupation,entdepa AS arrival_flag,\n entdepd AS depart_flag,entdepu AS update_flag,\n matflag AS match_arrival_depart_flag,\n biryear AS birth_year,dtaddto AS allowed_date,\n gender,insnum AS ins_number,airline,\n admnum AS admission_number,\n fltno AS flight_no,visatype,id_\n FROM i94;\n \"\"\"\n i94_df = spark.sql(sql)\n i94_df.write.mode('overwrite') \\\n .partitionBy('month', 'year') \\\n .format('parquet') \\\n .option(\"compression\", \"gzip\") \\\n .save('parquet_data/' + str(month) + '_' + str(year) + '.parquet')\n print('i94 parquet generation complete.-' + str(month) + '_' + str(year))\n\n\ndef airport():\n \"\"\"\n create parquet file of airport data\n :return: parquet file of airport data\n \"\"\"\n airport_codes_url = 's3://srk-data-eng-capstone/airport-codes_csv.csv'\n airport_codes = pd.read_csv(airport_codes_url)\n airport_codes = spark.createDataFrame(airport_codes)\n airport_codes.createOrReplaceTempView('airports')\n sql = \"\"\"SELECT ident, type, name, elevation_ft, continent, \n iso_country, iso_region, municipality, gps_code, iata_code AS airport_code, coordinates\n FROM airports WHERE iata_code IS NOT NULL\n UNION\n SELECT ident, type, name, elevation_ft, continent,\n iso_country, iso_region, municipality, gps_code, local_code AS airport_code, coordinates\n FROM airports WHERE local_code IS NOT NULL\"\"\"\n airports = spark.sql(sql)\n airports.write.mode('overwrite') \\\n .format('parquet') \\\n .option(\"compression\", \"gzip\") \\\n .save('parquet_data/airports.parquet')\n print('Airport parquet generation complete.')\n\n\ndef us_cities():\n \"\"\"\n create parquet file of US cities\n :return: parquet file of US cities\n \"\"\"\n us_city_demographics_url = 's3://srk-data-eng-capstone/us-cities-demographics.csv'\n us_city_demographics = pd.read_csv(us_city_demographics_url, sep=';')\n us_city_demographics = spark.createDataFrame(us_city_demographics)\n us_city_demographics.createOrReplaceTempView('us_cities')\n sql = \"\"\"SELECT city, `Median Age` AS median_age, `Male Population` AS male_population,\n `Female Population` AS female_population, `Total Population` AS population,\n `Number of Veterans` AS num_veterans, `Foreign-born` AS foreign_born, `Average Household Size` AS avg_household_size,\n `State Code` AS state, race, count\n FROM us_cities\"\"\"\n us_cities = spark.sql(sql)\n us_cities.write.mode('overwrite') \\\n .format('parquet') \\\n .option('compression', 'gzip') \\\n .save('parquet_data/us_cities.parquet')\n print('US cities parquet generation complete.')\n\n\ndef mapping(names):\n \"\"\"\n create parquet files for mapping tables\n :param names:mapping table name\n :return:parquet files of mapping tables\n \"\"\"\n origin = open('mappings/{}.txt'.format(names), 'r')\n code = []\n name = []\n for each in origin:\n line = \" \".join(each.split())\n try:\n code.append(int(line[:line.index('=')]))\n except:\n code.append(line[1:line.index('=') - 1])\n name.append(line[line.index('=') + 2:-1])\n origin.close()\n col_code = names + '_code'\n col_name = names + '_name'\n df = pd.DataFrame(list(zip(code, name)), columns=[col_code, col_name])\n df = spark.createDataFrame(df)\n df.write.mode('overwrite') \\\n .format('parquet') \\\n .option('compression', 'gzip') \\\n .save('parquet_data/' + names + '.parquet')\n print(names + ' parquet generation complete.')\n\n\ndef upload_files(filename):\n \"\"\"\n upload parquet file to S3\n :param filename: parquet filename\n :return:\n \"\"\"\n config = configparser.ConfigParser()\n config.read('iam.cfg')\n os.environ['AWS_ACCESS_KEY_ID'] = config['AWS_CREDS']['AWS_ACCESS_KEY_ID']\n os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS_CREDS']['AWS_SECRET_ACCESS_KEY']\n\n os.system('aws s3 cp parquet_data/{}.parquet s3://i94-backup --recursive'.format(filename))\n print(filename + ' is uploaded to bucket i94-backup')\n\n\ndef main():\n spark=spark_generator()\n immigration_data(16,'jan')\n airport()\n us_cities()\n mapping_list = ['country','us_states','visacode','mode']\n for each in mapping_list:\n mapping(each)\n uploading_list = ['jan_16','airports','country','mode','us_cities','us_states','visacode']\n for each in uploading_list:\n upload_files(each)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"srkucd/data_engineering_capstone","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22151633097","text":"# Counter\n\nmy_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nfor num in my_list:\n total = sum(my_list)\n\nprint(total)\n\n# Andrei's Answer\n\ncounter = 0\nfor item in my_list:\n counter = counter + item\n\nprint(counter)","repo_name":"IanGrin/ZTM.io","sub_path":"Python/Exercises/Exercise - Tricky Counter.py","file_name":"Exercise - Tricky Counter.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"410896903","text":"import sys\nfrom pandas import read_csv\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\ndef main():\n filename = sys.argv[1]\n df = read_csv(filename, header=None)\n df.columns = ['time','message_identifier','sender','recipients','topic','mode']\n \n print('(1/3) Processing Question 1..........')\n top_senders = fn_question_1(df, 10)\n \n print('(2/3) Processing Question 2..........')\n fn_question_2(df, top_senders, '2000-06-01','2001-06-01')\n \n print('(3/3) Processing Question 3..........')\n fn_question_3(df, top_senders, '2000-06-01','2001-06-01')\n\ndef fn_question_1(df, number_of_top_senders):\n \"\"\"\n Algorithm\n Convert UNIX time to date time\n Create separate dataframe of senders list and use groupby to find the count \n Create a separate dataframe for receipients list and split the list in separate columns\n move all the receipients columns in one column using stack function\n Group the receipients using groupby and calculate count \n Merge both dataframes, using inner, left and right joins\n returns Top X senders list\n \n :param pandas dataframe with raw data\n :param Number of top senders to be returned by function\n :return list of top senders\n \"\"\"\n df['Date_Time'] = df['time'].apply(change_time_format)\n \n df_sender= df['sender'].reset_index()\n df_sender = df_sender.groupby(['sender']).count().reset_index()\n df_sender.rename(columns={'index':'sent_count'},inplace=True)\n \n df_recipients = df['recipients'].str.split(pat='|',expand=True)\n df_recipients = df_recipients.stack().reset_index()\n df_recipients.columns = ['level_0','level_1','recipient']\n df_recipients = df_recipients.groupby(['recipient']).count().reset_index()\n df_recipients = df_recipients[['recipient','level_0']]\n df_recipients.rename(columns={'level_0':'received_count'},inplace=True)\n \n df_inner = pd.merge(df_sender,df_recipients,how='inner',left_on='sender',right_on='recipient')\n df_inner.drop(columns=['recipient'],inplace=True)\n df_inner.rename(columns={'sender':'person'},inplace=True) \n \n df_left_only = pd.merge(df_sender,df_recipients,how='left',left_on='sender',right_on='recipient', indicator=True) \\\n .query(\"_merge == 'left_only'\") \\\n .drop('_merge',1)\n df_left_only.drop(columns=['recipient'],inplace=True)\n df_left_only.rename(columns={'sender':'person'},inplace=True)\n \n df_right_only = pd.merge(df_sender,df_recipients,how='right',left_on='sender',right_on='recipient', indicator=True) \\\n .query(\"_merge == 'right_only'\") \\\n .drop('_merge',1)\n df_right_only.drop(columns=['sender'],inplace=True)\n df_right_only.rename(columns={'recipient':'person'},inplace=True)\n new_order = ['person','sent_count','received_count']\n df_right_only = df_right_only[new_order] \n \n df1 = df_inner.append([df_left_only,df_right_only])\n df1.fillna(0,inplace=True)\n df1.sort_values(by='sent_count',ascending=False,inplace=True)\n top_senders = df1['person'].head(int(number_of_top_senders)).tolist()\n df1.to_csv('question1.csv')\n \n return top_senders\n\ndef fn_question_2(df, top_senders, from_date, to_date):\n \"\"\"\n Algorithm\n Get the list of top senders from previous function\n Create dataframe with senders list,date time.\n Group the dataset by sender, data time and calculate count \n Filter the dataset based on senders list and date range\n Use pivot _table to aggregate the dataset and plot a graph\n \n :param pandas dataframe with raw data\n :param list of top senders\n :param From data, Duration for which data has to be analysed\n :param To date, Duration for which data has to be analysed\n \"\"\"\n from_date = pd.to_datetime(from_date).date()\n to_date = pd.to_datetime(to_date).date() \n df['Date'] = df['Date_Time'].apply(convert_time_to_date) \n df_sender_date= df[['sender','Date']].reset_index()\n df_sender_date = df_sender_date.groupby(['sender','Date']).count().reset_index() \n df2 = df_sender_date[df_sender_date.sender.isin(top_senders)] \n df2 = df2.loc[(df2['Date']>from_date) & (df2['Date']from_date) & (df3['Date'] 1:\n # 一度に傘が借りられすぎた\n print(\n \"Error: Number of umbrellas which are lent are too much at once a time.\"\n )\n self.buzzer_controller.inpulse(sound_time=0.05)\n continue\n # 借りるのと同時に返していないかチェック\n if len(now_umbrella_id_set - umbrella_id_set) > 0:\n # 借りるのと同時に別の傘が返された\n print(\"Error: The umbrella was returned as soon as it was borrowed.\")\n print(now_umbrella_id_set)\n print(umbrella_id_set)\n self.buzzer_controller.inpulse(sound_time=0.05)\n continue\n if lend_umbrella_id_set != set():\n # 1つだけ取り出された\n return lend_umbrella_id_set\n\n def rent_one(self):\n \"\"\"傘を貸し出す。\n\n Returns:\n str: 貸し出された傘のID\n \"\"\"\n # 現在のRFIDの状態を更新\n self.update_all_umbrella_id()\n # 現在の状態のコピーを取る\n now_umbrella_holder_list = copy.copy(self.umbrella_holder_list)\n print(\"Umbrella holder copied\")\n\n # ロックを解除\n self.unlock_all()\n # どの傘を持っていくか判定する\n lent_umbrella_id_set = self.__check_umbrella_was_taken(now_umbrella_holder_list)\n # 貸してない場所だけ再びロックをする\n self.lock_least_umbrellas()\n return lent_umbrella_id_set.pop()\n\n def check_umbrella_was_returned(self):\n \"\"\"RFIDタグが挿入されたか判定する。\n\n Returns:\n UmbrellaHolder: RFIDタグが挿入された傘立て\n \"\"\"\n # 初期状態の傘立てのリストから、傘が入っているもののリストを作る\n umbrella_id_set = self.__create_umbrella_id_set(self.umbrella_holder_list)\n self.update_all_umbrella_id()\n returned_umbrella_id_set = set()\n while True:\n now_umbrella_id_set = self.__create_umbrella_id_set(\n self.umbrella_holder_list\n )\n returned_umbrella_id_set = now_umbrella_id_set - umbrella_id_set\n if len(returned_umbrella_id_set) > 1:\n # 一度に複数のRFIDタグが挿入された\n print(\n \"Error: Number of umbrellas which are returned are too much at once a time.\"\n )\n self.buzzer_controller.inpulse(sound_time=0.05)\n continue\n break\n if len(returned_umbrella_id_set) == 0:\n # 集合の長さが0 = 何も挿入していない場合はNoneを返す\n return None\n returned_umbrella_id = returned_umbrella_id_set.pop()\n # 該当の傘立てをフィルターで割り出す\n returned_umbrella_holder = filter(\n lambda umbrella_holder: umbrella_holder.rfid == returned_umbrella_id,\n self.umbrella_holder_list,\n )\n # filterオブジェクトのままでは扱いづらいため、listに変換する\n returned_umbrella_holder_list = list(returned_umbrella_holder)\n # listから先頭の傘立てを取り出す\n returned_umbrella_holder = returned_umbrella_holder_list[0]\n # 施錠する\n returned_umbrella_holder.lock()\n return returned_umbrella_holder\n\n def unlock_umbrella_for_failsafe(self, umbrella_holder):\n \"\"\"なんらかのシステム障害により傘の返却処理が失敗した場合に、一旦傘をユーザーに返す。\n\n Args:\n umbrella_holder (UmbrellaHolder): 傘立て\n \"\"\"\n # 傘をすでにロックしているので、ロックを解除して取り出されるまで待つ\n umbrella_holder.unlock()\n while umbrella_holder.rfid is not None:\n self.buzzer_controller.alert(sound_time=0.08, interval_time=0.08)\n # TODO 何か警告などを表示する\n umbrella_holder.update_rfid()\n","repo_name":"HyodaKazuaki/sasoka-prototype","sub_path":"Manager/Umbrella/UmbrellaManager.py","file_name":"UmbrellaManager.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"672857263","text":"\"\"\"\n1- we make an array from numpy methods - using arange\n2- we save those array into a file - using savez \nwe will give to the file the name.npz\nwe give to every array a name x_axis, y_axis\n3- delete the variable x and y in order to leran how to load the information from load method in numpy\n3.del x, y\n4- load the npz file into our workspacr again\n5- after we loaded our array back again, we can load them into their vriable\n\"\"\"\n\nfrom unittest.mock import MagicMixin\nimport numpy as np\n\ndef main():\n\n x = np.arange(1, 11)\n # print (x)\n\n y = x * 2\n # print (y)\n\n \"\"\"\n then we save those array into a file - using savez \n we will give to the file the name.npz\n we give to every array a name x_axis, y_axis\n \"\"\"\n\n np.savez (\"x_y_array.npz\", x_axis = x, y_axis = y)\n\n \"\"\"\n now - i delete the variable x and y in order to leran how to load the information from load method in numpy\n 1- del x, y\n 2- whos - show me which variable are the function contains \n (in order to make sure our workspace empty and now we can load the file into the workspace)\n \"\"\"\n del x, y\n \n \"\"\"\n now we load the npz file into our workspace again\n \"\"\"\n load_xy = np.load (\"x_y_array.npz\")\n print (load_xy.files)\n\n\n \"\"\"\n after we loaded our array back again, we can load them into their vriable\n \"\"\"\n x = load_xy[\"x_axis\"]\n y = load_xy[\"y_axis\"]\n print(x)\n print(y)\n\nif __name__ == \"__main__\":\n main()","repo_name":"shirepsh/homeWork_25_08","sub_path":"numpy_func.py","file_name":"numpy_func.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31094866101","text":"import requests\nimport json\nfrom cisco_support import utils\n\nclass Case:\n\n __headers = None\n __verify = None\n __proxies = None\n\n def __init__(self, key: str, secret: str, verify: bool = True, proxies: dict = None) -> None:\n\n self.__verify = verify\n self.__proxies = proxies\n\n token = utils.getToken(key, secret, verify, proxies) \n\n self.__headers = {\n 'Authorization': f'Bearer {token}',\n 'Accept': 'application/json',\n }\n\n def getCaseSummary(self, case_ids: list, sort_by: str = 'UPDATED_DATE') -> dict:\n params = {\n 'sort_by': sort_by\n }\n\n case_ids = ','.join(case_ids)\n\n url = f'https://api.cisco.com/case/v3/cases/case_ids/{case_ids}'\n r = requests.get(url=url, headers=self.__headers, params=params, verify=self.__verify, proxies=self.__proxies)\n\n print(r.content)\n\n return r.json()\n\n def getCaseDetails(self, case_id: str) -> dict:\n params = {}\n\n url = f'https://api.cisco.com/case/v3/cases/details/case_id/{case_id}'\n r = requests.get(url=url, headers=self.__headers, params=params, verify=self.__verify, proxies=self.__proxies)\n\n return r.json()\n\n def getByContractID(self, contract_ids: list, date_created_from: str, date_created_to: str, status_flag: str = 'O', sort_by: str = 'UPDATED_DATE', page_index: int = 1) -> dict:\n params = {\n 'date_created_from': date_created_from,\n 'date_created_to': date_created_to,\n 'sort_by': sort_by,\n 'status_flag': status_flag,\n 'page_index': page_index\n }\n\n contract_ids = ','.join(contract_ids)\n\n url = f'https://api.cisco.com/case/v3/cases/contracts/contract_ids/{contract_ids}'\n r = requests.get(url=url, headers=self.__headers, params=params, verify=self.__verify, proxies=self.__proxies)\n\n return r.json()\n\n def getByUserID(self, user_ids: list, date_created_from: str = None, date_created_to: str = None, status_flag: str = 'O', sort_by: str = 'UPDATED_DATE', page_index: int = 1) -> dict:\n params = {\n 'date_created_from': date_created_from,\n 'date_created_to': date_created_to,\n 'sort_by': sort_by,\n 'status_flag': status_flag,\n 'page_index': page_index\n }\n\n user_ids = ','.join(user_ids)\n\n url = f'https://api.cisco.com/case/v3/cases/users/user_ids/{user_ids}'\n r = requests.get(url=url, headers=self.__headers, params=params, verify=self.__verify, proxies=self.__proxies)\n\n return r.json()","repo_name":"rothdennis/cisco_support","sub_path":"cisco_support/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"24366831786","text":"import os\r\nimport uuid\r\nimport torch\r\nimport numpy as np\r\nimport sys\r\n\r\nfrom typing import Union, List\r\nfrom ast import literal_eval\r\nfrom scenarios import ScenarioSetting, RescueTheGeneralScenario\r\nfrom strategies import RTG_ScriptedEnv\r\nfrom algorithms import MarlAlgorithm, PMAlgorithm, MultiAgentVecEnv\r\n\r\nimport strategies\r\nimport rescue\r\nimport utils\r\n\r\nclass Config():\r\n \"\"\" Class to hold config files\"\"\"\r\n\r\n def __init__(self):\r\n self.log_folder = str()\r\n self.device = str()\r\n self.epochs = int()\r\n self.model = str()\r\n self.parallel_envs = int()\r\n self.algo_params = dict()\r\n self.run = str()\r\n self.force_cpu = bool()\r\n self.script_blue_team = str()\r\n self.export_video = bool()\r\n self.train_scenarios = list()\r\n self.eval_scenarios = list()\r\n self.amp = bool()\r\n self.micro_batch_size: Union[str, int] = str()\r\n self.n_steps = int()\r\n self.export_rollout = bool()\r\n self.test_epochs = int()\r\n self.save_model = str()\r\n self.prediction_mode = str()\r\n self.deception_bonus = tuple()\r\n self.split_policy = bool()\r\n self.nan_check = bool()\r\n self.use_global_value = bool()\r\n self.seed = int()\r\n self.restore = bool()\r\n self.cmd_args = str()\r\n\r\n self.verbose = int()\r\n\r\n def __str__(self):\r\n\r\n # custom one looks better and will evaluate ok using literal_eval\r\n lines = []\r\n for k,v in vars(self).items():\r\n key_string = f\"'{k}':\"\r\n if type(v) is str: # wrap strings in quotes\r\n v = f\"'{v}'\"\r\n lines.append(f\"{key_string:<20}{v},\")\r\n return \"{\\n\"+(\"\\n\".join(lines))+\"\\n}\"\r\n\r\n # d = {}\r\n # for k,v in vars(self).items():\r\n # if type(v) is list and len(v) > 0 and type(v[0]) is ScenarioSetting:\r\n # v = [[scenario.scenario_name] + scenario.strategies for scenario in v]\r\n # d[k] = v\r\n # return json.dumps(d, indent=4)\r\n\r\n def setup(self, args:dict):\r\n\r\n self.cmd_args = \" \".join(sys.argv)\r\n\r\n config_vars = set(k for k,v in vars(self).items())\r\n\r\n if type(args['algo_params']) is str:\r\n args['algo_params'] = literal_eval(args['algo_params'])\r\n\r\n # setup config from command line args\r\n # most of these just get copied across directly\r\n for arg_k,arg_v in args.items():\r\n # check if this matches a config variable\r\n if arg_k in config_vars:\r\n if type(arg_v) is str:\r\n # map all strings to lower_case\r\n arg_v = arg_v.lower()\r\n vars(self)[arg_k] = arg_v\r\n\r\n self.uuid = uuid.uuid4().hex[-8:]\r\n\r\n if 'log_folder' not in args:\r\n if args.get('mode','') == \"evaluate\":\r\n self.log_folder = f\"run/{args['run']}/evaluate [{self.uuid}]\"\r\n else:\r\n self.log_folder = f\"run/{args['run']} [{self.uuid}]\"\r\n\r\n rescue.LOG_FILENAME = self.log_folder\r\n\r\n # work out the device\r\n if self.device == \"auto\":\r\n self.device = \"cuda\" if torch.has_cuda else \"cpu\"\r\n\r\n if type(self.deception_bonus) == str:\r\n self.deception_bonus = literal_eval(str(self.deception_bonus))\r\n if type(self.deception_bonus) in [list]:\r\n self.deception_bonus = tuple(self.deception_bonus)\r\n\r\n if type(self.deception_bonus) in [float, int]:\r\n self.deception_bonus = tuple([self.deception_bonus] * 3)\r\n\r\n # setup the scenarios... these are a bit complex now due to the scripted players\r\n args['eval_scenarios'] = args.get('eval_scenarios', args.get('train_scenarios'))\r\n self.train_scenarios = ScenarioSetting.parse(args['train_scenarios'])\r\n self.eval_scenarios = ScenarioSetting.parse(args['eval_scenarios'])\r\n\r\n if self.seed < 0:\r\n self.seed = np.random.randint(0, 99999999)\r\n\r\ndef make_algo(vec_env: MultiAgentVecEnv, config:Config, model_name=None):\r\n\r\n algo_params = config.algo_params.copy()\r\n\r\n algo_params[\"model_name\"] = model_name or config.model\r\n\r\n algorithm = PMAlgorithm(\r\n vec_env,\r\n device=config.device,\r\n amp=config.amp,\r\n export_rollout=config.export_rollout,\r\n micro_batch_size=config.micro_batch_size,\r\n n_steps=config.n_steps,\r\n use_global_value_module=config.use_global_value,\r\n prediction_mode=config.prediction_mode,\r\n deception_bonus=config.deception_bonus,\r\n split_policy=config.split_policy,\r\n verbose=config.verbose >= 2, **algo_params,\r\n nan_check=config.nan_check,\r\n )\r\n\r\n algorithm.log_folder = config.log_folder\r\n\r\n print(f\" -model created using batch size of {algorithm.batch_size} and mini-batch size of {algorithm.mini_batch_size}\")\r\n\r\n return algorithm\r\n\r\n\r\ndef evaluate_model(algorithm: MarlAlgorithm, eval_scenario, sub_folder, trials=100):\r\n \"\"\"\r\n Evaluate given model in given environment.\r\n :param algorithm:\r\n :param trials:\r\n :return:\r\n \"\"\"\r\n\r\n # note, this is the evaluation used by train.py, merge this with arean's evaluation script\r\n\r\n # run them all in parallel at once to make sure we get exactly 'trials' number of environments\r\n os.makedirs(sub_folder, exist_ok=True)\r\n vec_env = make_env(eval_scenario, trials, name=\"eval\", log_path=sub_folder, )\r\n env_obs = vec_env.reset()\r\n rnn_states = algorithm.get_initial_rnn_state(vec_env.num_envs)\r\n env_terminals = np.zeros([len(rnn_states)], dtype=np.bool)\r\n vec_env.run_once = True\r\n\r\n # play the game...\r\n results = [(0, 0, 0) for _ in range(trials)]\r\n while not all(env_terminals):\r\n\r\n with torch.no_grad():\r\n roles = vec_env.get_roles()\r\n model_output, new_rnn_states = algorithm.forward(\r\n obs=torch.from_numpy(env_obs),\r\n rnn_states=rnn_states,\r\n roles=torch.from_numpy(roles)\r\n )\r\n rnn_states[:] = new_rnn_states\r\n\r\n log_policy = model_output[\"log_policy\"].detach().cpu().numpy()\r\n actions = utils.sample_action_from_logp(log_policy)\r\n\r\n env_obs, env_rewards, env_terminals, env_infos = vec_env.step(actions)\r\n\r\n # look for finished games\r\n for i, env in enumerate(vec_env.games):\r\n if env.round_outcome != \"\":\r\n results[i] = env.round_team_scores\r\n\r\n # collate results\r\n red_score = np.mean([r for r, g, b in results])\r\n green_score = np.mean([g for r, g, b in results])\r\n blue_score = np.mean([b for r, g, b in results])\r\n\r\n # make sure results have be written to env log\r\n rescue.flush_logs()\r\n\r\n return red_score, green_score, blue_score\r\n\r\n\r\ndef make_env(\r\n scenarios: Union[List[ScenarioSetting], ScenarioSetting, str],\r\n parallel_envs:int,\r\n log_path=None,\r\n name=\"env\"\r\n):\r\n \"\"\"\r\n Creates a vectorized environment from given scenario specifications\r\n :param scenarios: Either a string: e.g. \"red2\", in which case a single scenario with no scripting is used, or a\r\n single ScriptedScenario, or a list of ScriptedScenarios\r\n :param parallel_envs: Number of times to duplicate the environment(s)\r\n :param name:\r\n :return:\r\n \"\"\"\r\n\r\n # for convenience we allow non-list input, and string format\r\n if isinstance(scenarios, ScenarioSetting):\r\n scenarios = [scenarios]\r\n\r\n if isinstance(scenarios, str):\r\n scenarios = ScenarioSetting.parse(scenarios)\r\n\r\n env_functions = []\r\n for _ in range(parallel_envs):\r\n for index, scenario_setting in enumerate(scenarios):\r\n # convert strategies names to strategy functions\r\n strats = []\r\n for strategy in scenario_setting.strategies:\r\n if strategy is not None:\r\n strats.append(strategies.register[strategy])\r\n else:\r\n strats.append(None)\r\n\r\n if log_path is None:\r\n log_file = None\r\n else:\r\n log_file = os.path.join(log_path, f\"env_{index}.csv\")\r\n\r\n make_env_fn = lambda _strats=tuple(strats), _name=name, _scenario_setting=scenario_setting, _log_file=log_file: \\\r\n RTG_ScriptedEnv(\r\n scenario_name=_scenario_setting.scenario_name, name=_name,\r\n red_strategy=_strats[0],\r\n green_strategy=_strats[1],\r\n blue_strategy=_strats[2],\r\n log_file=_log_file\r\n )\r\n\r\n env_functions.append(make_env_fn)\r\n\r\n vec_env = MultiAgentVecEnv(env_functions)\r\n\r\n return vec_env\r\n","repo_name":"maitchison/RTG","sub_path":"support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":8780,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"29553784108","text":"\"\"\"\r\nCopyright 2020 The OneFlow Authors. All rights reserved.\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\nimport unittest\r\nfrom collections import OrderedDict\r\n\r\nimport numpy as np\r\n\r\nimport oneflow.experimental as flow\r\nfrom test_util import GenArgList\r\n\r\n\r\ndef _test_argmax_aixs_negative(test_case, device):\r\n input = flow.Tensor(\r\n np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),\r\n )\r\n axis = -1\r\n of_out = flow.argmax(input, dim=axis)\r\n np_out = np.argmax(input.numpy(), axis=axis)\r\n test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))\r\n\r\n\r\ndef _test_tensor_argmax(test_case, device):\r\n input = flow.Tensor(\r\n np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),\r\n )\r\n axis = 0\r\n of_out = input.argmax(dim=axis)\r\n np_out = np.argmax(input.numpy(), axis=axis)\r\n test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))\r\n test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))\r\n\r\n\r\ndef _test_argmax_axis_postive(test_case, device):\r\n input = flow.Tensor(\r\n np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),\r\n )\r\n axis = 1\r\n of_out = flow.argmax(input, dim=axis)\r\n np_out = np.argmax(input.numpy(), axis=axis)\r\n test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))\r\n\r\n\r\ndef _test_argmax_keepdims(test_case, device):\r\n input = flow.Tensor(\r\n np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),\r\n )\r\n axis = 0\r\n of_out = input.argmax(axis, True)\r\n np_out = np.argmax(input.numpy(), axis=axis)\r\n np_out = np.expand_dims(np_out, axis=axis)\r\n\r\n test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))\r\n test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))\r\n\r\n\r\ndef _test_argmax_dim_equal_none(test_case, device):\r\n input = flow.Tensor(\r\n np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),\r\n )\r\n of_out = input.argmax()\r\n np_out = np.argmax(input.numpy().flatten(), axis=0)\r\n test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))\r\n\r\n\r\n@unittest.skipIf(\r\n not flow.unittest.env.eager_execution_enabled(),\r\n \".numpy() doesn't work in lazy mode\",\r\n)\r\nclass TestArgmax(flow.unittest.TestCase):\r\n def test_argmax(test_case):\r\n arg_dict = OrderedDict()\r\n arg_dict[\"test_fun\"] = [\r\n _test_argmax_aixs_negative,\r\n _test_tensor_argmax,\r\n _test_argmax_axis_postive,\r\n _test_argmax_keepdims,\r\n _test_argmax_dim_equal_none,\r\n ]\r\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\r\n for arg in GenArgList(arg_dict):\r\n arg[0](test_case, *arg[1:])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"wanghongsheng01/framework_enflame","sub_path":"oneflow/python/test/modules/test_argmax.py","file_name":"test_argmax.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27134847245","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass TextCNN(nn.Module):\n def __init__(self, emb_dim, dim_channel, kernel_wins, dropout_rate, args):\n super(TextCNN, self).__init__()\n self.args = args\n # Convolutional Layers with different window size kernels\n self.convs = nn.ModuleList([nn.Conv2d(1, dim_channel, (w, emb_dim)) for w in kernel_wins])\n # Dropout layer\n self.dropout = nn.Dropout(dropout_rate)\n \n def forward(self, \n emb_x,\n attention_mask):\n attention_mask = attention_mask.unsqueeze(-1).expand(emb_x.shape[0], emb_x.shape[1], emb_x.shape[2])\n emb_x = emb_x * attention_mask \n emb_x = emb_x.unsqueeze(1)\n con_x = [conv(emb_x) for conv in self.convs]\n pool_x = [F.max_pool1d(x.squeeze(-1), x.size()[2]) for x in con_x]\n fc_x = torch.cat(pool_x, dim=1)\n fc_x = fc_x.squeeze(-1)\n fc_x = self.dropout(fc_x)\n return fc_x\n\nclass ClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n def __init__(self, hidden_dim):\n super().__init__()\n self.dense = nn.Linear(hidden_dim, hidden_dim)\n self.Dropout = nn.Dropout(0.1)\n self.out_proj = nn.Linear(hidden_dim, 155)\n \n self.func_dense = nn.Linear(hidden_dim, hidden_dim)\n self.func_out_proj = nn.Linear(hidden_dim, 2)\n \n def forward(self, hidden):\n x = self.Dropout(hidden)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.Dropout(x)\n x = self.out_proj(x)\n \n func_x = self.Dropout(hidden)\n func_x = self.func_dense(func_x)\n func_x = torch.tanh(func_x)\n func_x = self.Dropout(func_x)\n func_x = self.func_out_proj(func_x)\n return x.squeeze(-1), func_x\n\nclass Model(nn.Module): \n def __init__(self, roberta, tokenizer, args, hidden_dim=768, num_labels=155):\n super(Model, self).__init__()\n self.word_embedding = roberta.embeddings.word_embeddings\n self.birnn = nn.LSTM(768, 768, num_layers=2, batch_first=True, bidirectional=True)\n self.tokenizer = tokenizer\n self.args = args\n # CLS head\n self.classifier = ClassificationHead(hidden_dim=768)\n\n def forward(self, input_ids_with_pattern, statement_mask, labels=None, func_labels=None):\n statement_mask = statement_mask[:, :self.args.num_labels]\n if self.training:\n embed = self.word_embedding(input_ids_with_pattern)\n inputs_embeds = torch.amax(embed, dim=2)\n inputs_embeds = inputs_embeds[:, :self.args.num_labels, :] \n out, (hn, cn) = self.birnn(inputs_embeds)\n rep = hn[-1] \n logits, func_logits = self.classifier(rep)\n loss_fct = nn.CrossEntropyLoss()\n statement_loss = loss_fct(logits, labels)\n loss_fct_2 = nn.CrossEntropyLoss()\n func_loss = loss_fct_2(func_logits, func_labels)\n return statement_loss, func_loss\n else:\n embed = self.word_embedding(input_ids_with_pattern)\n inputs_embeds = torch.amax(embed, dim=2)\n inputs_embeds = inputs_embeds[:, :self.args.num_labels, :]\n out, (hn, cn) = self.birnn(inputs_embeds)\n rep = hn[-1]\n logits, func_logits = self.classifier(rep)\n probs = torch.sigmoid(logits)\n func_probs = torch.softmax(func_logits, dim=-1)\n return probs, func_probs","repo_name":"optimatch/optimatch","sub_path":"baselines/statement_ICVH/icvh_model.py","file_name":"icvh_model.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"6518835290","text":"# 문제\n# 여행을 떠난 세준이는 지도를 하나 구하였다. 이 지도는 아래 그림과 같이 직사각형 모양이며 여러 칸으로 나뉘어져 있다. 한 칸은 한 지점을 나타내는데 각 칸에는 그 지점의 높이가 쓰여 있으며, 각 지점 사이의 이동은 지도에서 상하좌우 이웃한 곳끼리만 가능하다.\n\n\n\n# 현재 제일 왼쪽 위 칸이 나타내는 지점에 있는 세준이는 제일 오른쪽 아래 칸이 나타내는 지점으로 가려고 한다. 그런데 가능한 힘을 적게 들이고 싶어 항상 높이가 더 낮은 지점으로만 이동하여 목표 지점까지 가고자 한다. 위와 같은 지도에서는 다음과 같은 세 가지 경로가 가능하다.\n\n \n\n# 지도가 주어질 때 이와 같이 제일 왼쪽 위 지점에서 출발하여 제일 오른쪽 아래 지점까지 항상 내리막길로만 이동하는 경로의 개수를 구하는 프로그램을 작성하시오.\n\n# 입력\n# 첫째 줄에는 지도의 세로의 크기 M과 가로의 크기 N이 빈칸을 사이에 두고 주어진다. 이어 다음 M개 줄에 걸쳐 한 줄에 N개씩 위에서부터 차례로 각 지점의 높이가 빈 칸을 사이에 두고 주어진다. M과 N은 각각 500이하의 자연수이고, 각 지점의 높이는 10000이하의 자연수이다.\n\n# 출력\n# 첫째 줄에 이동 가능한 경로의 수 H를 출력한다. 모든 입력에 대하여 H는 10억 이하의 음이 아닌 정수이다.\nimport sys\nsys.setrecursionlimit(10**6)\ndef sol(m, n, table):\n def dfs(loc, goal, table, dp):\n x, y = loc\n if loc == goal:\n return dp[x][y]\n if dp[x][y] != -1:\n return dp[x][y]\n \n for d in [(1,0),(-1,0),(0,1),(0,-1)]:\n nextloc = (x+d[0], y+d[1])\n nextx, nexty = nextloc\n if 0 <= nextloc[0] < len(dp) and 0 <= nextloc[1] < len(dp[0]) and table[nextx][nexty] < table[x][y]:\n dp[x][y] += dfs(nextloc, goal, table, dp)\n dp[x][y] += 1\n return dp[x][y]\n \n \n goal = (m-1, n-1)\n dp = [[-1 for i in range(n)] for i in range(m)]\n dp[goal[0]][goal[1]] = 1\n ans = dfs((0, 0), goal, table, dp)\n \n return ans\n \nargs = []\nwhile True:\n try:\n args += [input()]\n except:\n break\nm, n = [int(e) for e in args[0].split(\" \")]\ntable = [[int(e) for e in line.split(\" \")] for line in args[1:]]\n\nprint(sol(m, n, table))","repo_name":"izjoker/Algorithm-Problems","sub_path":"baekjoon/1520/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9255869764","text":"\nfrom tests.util.asserts import assert_that, equals, fail\nfrom tests.util.test import test\nfrom tests.util.system_test import system_test\nfrom tests.util.all_examples import all_examples\n\nfrom pycell.lexer import lex\n\n# --- Utils ---\n\n\ndef lexed(inp):\n return list(lex(inp))\n\n# --- Lexing ---\n\n\n@test\ndef Empty_file_produces_nothing():\n assert_that(lexed(\"\"), equals([]))\n\n\n@test\ndef Open_bracket_produces_open_bracket_token():\n assert_that(lexed(\"(\"), equals([(\"(\", \"\")]))\n\n\n@test\ndef Close_bracket_produces_close_bracket_token():\n assert_that(lexed(\")\"), equals([(\")\", \"\")]))\n\n\n@test\ndef Open_brace_produces_open_brace_token():\n assert_that(lexed(\"{\"), equals([(\"{\", \"\")]))\n\n\n@test\ndef Close_brace_produces_close_brace_token():\n assert_that(lexed(\"}\"), equals([(\"}\", \"\")]))\n\n\n@test\ndef Multiple_brackets_become_multiple_tokens():\n assert_that(\n lexed(\"()\"),\n equals([(\"(\", \"\"), (\")\", \"\")])\n )\n\n\n@test\ndef Single_letter_becomes_a_symbol_token():\n assert_that(lexed(\"a\"), equals([(\"symbol\", \"a\")]))\n\n\n@test\ndef Multiple_letters_become_a_symbol_token():\n assert_that(lexed(\"foo\"), equals([(\"symbol\", \"foo\")]))\n\n\n@test\ndef A_symbol_followed_by_a_bracket_becomes_two_tokens():\n assert_that(\n lexed(\"foo(\"),\n equals([(\"symbol\", \"foo\"), (\"(\", \"\")])\n )\n\n\n@test\ndef Items_separated_by_spaces_become_separate_tokens():\n assert_that(\n lexed(\"foo bar ( \"),\n equals(\n [\n (\"symbol\", \"foo\"),\n (\"symbol\", \"bar\"),\n (\"(\", \"\")\n ]\n )\n )\n\n\n@test\ndef Items_separated_by_newlines_become_separate_tokens():\n assert_that(\n lexed(\"foo\\nbar\"),\n equals(\n [\n (\"symbol\", \"foo\"),\n (\"symbol\", \"bar\")\n ]\n )\n )\n\n\n@test\ndef Symbols_may_contain_numbers_and_underscores():\n assert_that(\n lexed(\"foo2_bar ( \"),\n equals(\n [\n (\"symbol\", \"foo2_bar\"),\n (\"(\", \"\")\n ]\n )\n )\n\n\n@test\ndef Symbols_may_start_with_underscores():\n assert_that(\n lexed(\"_foo2_bar ( \"),\n equals(\n [\n (\"symbol\", \"_foo2_bar\"),\n (\"(\", \"\")\n ]\n )\n )\n\n\n@test\ndef Integers_are_parsed_into_number_tokens():\n assert_that(lexed(\"128\"), equals([(\"number\", \"128\")]))\n\n\n@test\ndef Floating_points_are_parsed_into_number_tokens():\n assert_that(lexed(\"12.8\"), equals([(\"number\", \"12.8\")]))\n\n\n@test\ndef Leading_decimal_point_produces_number_token():\n assert_that(lexed(\".812\"), equals([(\"number\", \".812\")]))\n\n\n@test\ndef Double_quoted_values_produce_string_tokens():\n assert_that(lexed('\"foo\"'), equals([(\"string\", 'foo')]))\n\n\n@test\ndef Single_quoted_values_produce_string_tokens():\n assert_that(lexed(\"'foo'\"), equals([(\"string\", 'foo')]))\n\n\n@test\ndef Different_quote_types_allow_the_other_type_inside():\n assert_that(lexed(\"'f\\\"oo'\"), equals([(\"string\", 'f\"oo')]))\n assert_that(lexed('\"f\\'oo\"'), equals([(\"string\", \"f'oo\")]))\n\n\n@test\ndef Empty_quotes_produce_an_empty_string_token():\n assert_that(lexed('\"\"'), equals([(\"string\", '')]))\n\n\n@test\ndef An_unfinished_string_is_an_error():\n try:\n lexed('\"foo')\n fail(\"Should throw\")\n except Exception as e:\n assert_that(str(e), equals(\"A string ran off the end of the program.\"))\n\n\n@test\ndef Commas_produce_comma_tokens():\n assert_that(lexed(\",\"), equals([(\",\", \"\")]))\n\n\n@test\ndef Equals_produces_an_equals_token():\n assert_that(lexed(\"=\"), equals([(\"=\", \"\")]))\n\n\n@test\ndef Semicolons_produce_semicolon_tokens():\n assert_that(lexed(\";\"), equals([(\";\", \"\")]))\n\n\n@test\ndef Colons_produce_colon_tokens():\n assert_that(lexed(\":\"), equals([(\":\", \"\")]))\n\n\n@test\ndef Arithmetic_operators_produce_operation_tokens():\n assert_that(lexed(\"+\"), equals([(\"operation\", \"+\")]))\n assert_that(lexed(\"-\"), equals([(\"operation\", \"-\")]))\n assert_that(lexed(\"*\"), equals([(\"operation\", \"*\")]))\n assert_that(lexed(\"/\"), equals([(\"operation\", \"/\")]))\n\n\n@test\ndef Multiple_token_types_can_be_combined():\n assert_that(\n lexed('frobnicate( \"Hello\" + name, 4 / 5.0);'),\n equals(\n [\n (\"symbol\", \"frobnicate\"),\n (\"(\", \"\"),\n (\"string\", \"Hello\"),\n (\"operation\", \"+\"),\n (\"symbol\", \"name\"),\n (\",\", \"\"),\n (\"number\", \"4\"),\n (\"operation\", \"/\"),\n (\"number\", \"5.0\"),\n (\")\", \"\"),\n (\";\", \"\")\n ]\n )\n )\n\n\n@test\ndef A_complex_example_program_lexes():\n example = \"\"\"\n double =\n {:(x)\n 2 * x;\n };\n\n num1 = 3;\n num2 = double( num );\n\n answer =\n if( greater_than( num2, 5 ),\n {\"LARGE!\"},\n {\"small.\"}\n );\n\n print( answer );\n \"\"\"\n lexed(example)\n\n\n@test\ndef Tabs_are_an_error():\n try:\n lexed(\"aaa\\tbbb\")\n fail(\"Should throw\")\n except Exception as e:\n assert_that(str(e), equals(\"Tab characters are not allowed in Cell.\"))\n\n\n# --- Example programs ---\n\n\n@system_test\ndef All_examples_lex():\n from pycell.chars_in_file import chars_in_file\n for example in all_examples():\n with open(example, encoding=\"ascii\") as f:\n lexed(chars_in_file(f))\n","repo_name":"andybalaam/cell","sub_path":"tests/lexer_tests.py","file_name":"lexer_tests.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"81"} +{"seq_id":"32919135509","text":"import cv2\nimport numpy as np\nimport dlib\nfrom math import hypot\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\n#--------------------------------------------------------------------------------------------------------------------\ndef midpoint(p1 ,p2):\n return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)\n\nfont = cv2.FONT_HERSHEY_PLAIN\n\n#--------------------------------------------------------------------------------------------------------------------\n\ndef detect_faces(img, cascade):\n print (\"start 3\")\n gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n coords = cascade.detectMultiScale(gray_frame, 1.3, 5)\n if len(coords) > 1:\n biggest = (0, 0, 0, 0)\n for i in coords:\n if i[3] > biggest[3]:\n biggest = i\n biggest = np.array([i], np.int32)\n elif len(coords) == 1:\n biggest = coords\n else:\n return None\n for (x, y, w, h) in biggest:\n frame = img[y:y + h, x:x + w]\n return frame\n\n#--------------------------------------------------------------------------------------------------------------------\n\ndef get_blinking_ratio(eye_points, facial_landmarks):\n left_point = (facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)\n right_point = (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)\n center_top = midpoint(facial_landmarks.part(eye_points[1]), facial_landmarks.part(eye_points[2]))\n center_bottom = midpoint(facial_landmarks.part(eye_points[5]), facial_landmarks.part(eye_points[4]))\n\n #hor_line = cv2.line(frame, left_point, right_point, (0, 255, 0), 2)\n #ver_line = cv2.line(frame, center_top, center_bottom, (0, 255, 0), 2)\n\n hor_line_lenght = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))\n ver_line_lenght = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))\n\n ratio = hor_line_lenght / ver_line_lenght\n return ratio\n\n#--------------------------------------------------------------------------------------------------------------------\n\ndef get_gaze_ratio(img, gray, eye_points, facial_landmarks):\n left_eye_region = np.array([(facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y),\n (facial_landmarks.part(eye_points[1]).x, facial_landmarks.part(eye_points[1]).y),\n (facial_landmarks.part(eye_points[2]).x, facial_landmarks.part(eye_points[2]).y),\n (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y),\n (facial_landmarks.part(eye_points[4]).x, facial_landmarks.part(eye_points[4]).y),\n (facial_landmarks.part(eye_points[5]).x, facial_landmarks.part(eye_points[5]).y)], np.int32)\n # cv2.polylines(frame, [left_eye_region], True, (0, 0, 255), 2)\n\n height, width, _ = img.shape\n mask = np.zeros((height, width), np.uint8)\n cv2.polylines(mask, [left_eye_region], True, 255, 2)\n cv2.fillPoly(mask, [left_eye_region], 255)\n eye = cv2.bitwise_and(gray, gray, mask=mask)\n cv2.imshow('eye', eye)\n\n # min_x = np.min(left_eye_region[:, 0])\n # max_x = np.max(left_eye_region[:, 0])\n # min_y = np.min(left_eye_region[:, 1])\n # max_y = np.max(left_eye_region[:, 1])\n #\n # gray_eye = eye[min_y: max_y, min_x: max_x]\n # _, threshold_eye = cv2.threshold(gray_eye, 70, 255, cv2.THRESH_BINARY)\n # height, width = threshold_eye.shape\n # left_side_threshold = threshold_eye[0: height, 0: int(width / 2)]\n # left_side_white = cv2.countNonZero(left_side_threshold)\n #\n # right_side_threshold = threshold_eye[0: height, int(width /2): width]\n # right_side_white = cv2.countNonZero(right_side_threshold)\n #\n #\n # if left_side_white == 0:\n # gaze_ratio = 1\n # elif right_side_white == 0:\n # gaze_ratio = 5\n # else:\n # gaze_ratio = left_side_white / right_side_white\n # return gaze_ratio\n\n\ndef main():\n print ('start')\n img = cv2.imread('elon.jpg')\n new_frame = np.zeros((500, 500, 3), np.uint8)\n print('start2')\n faces = detect_faces(img, face_cascade)\n # if faces is not None:\n # landmarks = predictor(gray, face)\n gray = cv2.cvtColor(faces, cv2.COLOR_BGR2GRAY)\n faces_i = detector(gray)\n for face in faces_i:\n landmarks = predictor(gray, face)\n left_eye_ratio = get_blinking_ratio([36, 37, 38, 39, 40, 41], landmarks)\n right_eye_ratio = get_blinking_ratio([42, 43, 44, 45, 46, 47], landmarks)\n blinking_ratio = (left_eye_ratio + right_eye_ratio) / 2\n\n print(blinking_ratio)\n\n # if blinking_ratio > 5.7:\n # cv2.putText(img, \"BLINKING\", (50, 150), font, 7, (255, 0, 0))\n print (\"in\")\n # Gaze detection\n gaze_ratio_left_eye = get_gaze_ratio(img,gray, [36, 37, 38, 39, 40, 41], landmarks)\n gaze_ratio_right_eye = get_gaze_ratio(img,gray, [42, 43, 44, 45, 46, 47], landmarks)\n gaze_ratio = (gaze_ratio_right_eye + gaze_ratio_left_eye) / 2\n print (gaze_ratio)\n\n if gaze_ratio <= 1:\n cv2.putText(img, \"RIGHT\", (50, 100), font, 2, (0, 0, 255), 3)\n\n elif 1 < gaze_ratio < 1.7:\n cv2.putText(img, \"CENTER\", (50, 100), font, 2, (0, 0, 255), 3)\n new_frame[:] = (0, 0, 255)\n else:\n new_frame[:] = (255, 0, 0)\n cv2.putText(img, \"LEFT\", (50, 100), font, 2, (0, 0, 255), 3)\n\n\n # cv2.imshow('gray_eye', gray_eye)\n # cv2.imshow('threshold', threshold)\n # cv2.imshow('eye', eye)\n cv2.imshow(\"faces\", faces)\n #cv2.imshow(\"new_frame\", new_frame)\n cv2.imshow(\"img\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Bhagu12/Gaze-estimation-in-a-group-image","sub_path":"detect_gaze.py","file_name":"detect_gaze.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2762021641","text":"from fuzzywuzzy import fuzz\r\nfrom fuzzywuzzy import process\r\nimport json\r\nimport requests\r\n\r\n\r\nEN = 0\r\nROMAJI = 1\r\nKANJI = 2\r\nBASE_URL = r'https://fsc9cff890.execute-api.us-east-1.amazonaws.com/test/songs/'\r\n\r\nsongs = None\r\ntitles = None\r\nsong_info = dict()\r\n\r\n\r\ndef update_cache():\r\n global songs\r\n global titles\r\n\r\n response = requests.get(BASE_URL)\r\n if response.status_code == requests.codes.ok:\r\n songs = response.json()\r\n titles = dict()\r\n\r\n for index, song in enumerate(songs):\r\n titles[song['title']['romaji']] = index\r\n\r\n\r\ndef update_song_info_cache(title):\r\n global song_info\r\n\r\n response = requests.get(BASE_URL, {'title': title})\r\n if response.status_code == requests.codes.ok:\r\n song_info[title] = response.json()\r\n\r\n\r\ndef get_songs(artist=None):\r\n if songs is None:\r\n update_cache()\r\n\r\n if artist is not None:\r\n artist = artist.lower()\r\n songs = [song for song in songs if song['artist'].lower() == artist]\r\n\r\n return songs\r\n\r\n\r\ndef get_artists():\r\n if songs is None:\r\n update_cache()\r\n\r\n artists = {song['artist'] for song in songs}\r\n return artists\r\n\r\n\r\ndef get_lyrics(title, lang=EN):\r\n if titles is None:\r\n update_cache()\r\n\r\n cached_titles = list(titles.keys())\r\n result = process.extractOne(title, cached_titles, score_cutoff=75)\r\n \r\n if result is None:\r\n update_cache()\r\n cached_titles = titles.keys()\r\n result = process.extractOne(title, cached_titles, score_cutoff=75)\r\n if result is None:\r\n return None\r\n\r\n title = result[0]\r\n id = titles[title]\r\n song = songs[id]\r\n arg = song['str_id']\r\n\r\n if arg not in song_info:\r\n update_song_info_cache(arg)\r\n\r\n song_data = song_info[arg]\r\n \r\n if lang == EN:\r\n return song_data['lyrics']['en']\r\n elif lang == ROMAJI:\r\n return song_data['lyrics']['romaji']\r\n else:\r\n return None","repo_name":"ahuei123456/Ruby-Bot","sub_path":"rubybot/utils/llparser.py","file_name":"llparser.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"5971129598","text":"import pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--daemon\",\n action=\"store\",\n type=str,\n default=\"p2pd\",\n choices=(\"p2pd\", \"jsp2pd\"),\n help=\"Select the P2P daemon to use (default: p2pd).\",\n )\n\n\n@pytest.fixture\ndef daemon_executable(request):\n return request.config.getoption(\"--daemon\")\n\n\ndef pytest_collection_modifyitems(config, items):\n # No tests skipped when using the Go daemon at the moment\n if config.getoption(\"--daemon\") == \"p2pd\":\n return\n\n skip_reasons = {\n \"go_only\": \"this feature is not supported by jsp2pd.\",\n \"jsp2pd_probable_bug\": \"this test fails with jsp2pd, could be because of a bug.\",\n }\n for item in items:\n for mark, reason in skip_reasons.items():\n if mark in item.keywords:\n item.add_marker(pytest.mark.skip(reason=reason))\n","repo_name":"mhchia/py-libp2p-daemon-bindings","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"33037280218","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isBalanced(self, root: Optional[TreeNode]) -> bool:\n if not root:\n return True\n if abs(self.depth(root.left)-abs(self.depth(root.right)))>1:\n return False\n return self.isBalanced(root.left) and self.isBalanced(root.right)\n \n \n \n \n def depth(self,root):\n maxdepth=0\n if not root:\n return 0\n stack=[[root,1]]\n while stack:\n node,d=stack.pop()\n if node:\n maxdepth=max(maxdepth,d)\n stack.append([node.left,d+1])\n stack.append([node.right,d+1])\n \n return maxdepth\n \n \n \n ","repo_name":"niksintelli4ever/LeetCode-Journey","sub_path":"0110-balanced-binary-tree/0110-balanced-binary-tree.py","file_name":"0110-balanced-binary-tree.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73301236426","text":"import numpy as np \nimport argparse\nfrom scipy.linalg import block_diag\nimport pandas as pd\nimport sys\nfrom utils import *\nfrom iterate_update import *\nfrom pre_process import *\n\n# def iSphereMAP(X,Y,grp_info,estPi,nlambda,sparse_method = \"Top_one\",k = 3):\ndef main():\n # parse command line arguments\n parser = argparse.ArgumentParser(description = 'Map ICD code embeddings in two institutions into a shared space')\n parser.add_argument('src_input', help = 'the input source embeddings')\n parser.add_argument('trg_input', help = 'the input target embeddings')\n parser.add_argument('group_information', help = 'the group_information for source input')\n parser.add_argument('estPi', choices = ['OLS', 'cosine','spherical','lasso'],default = 'cosine', help = 'Methods to estimate Pi')\n parser.add_argument('nlambda', type = int, default = 5, help = 'evenly break [1e-5, 1-1e-5] into intervals')\n parser.add_argument('sparse_method', choices = ['Top_one', 'hard_threshold','Top_k'], default = 'cosine', help = 'Methods to sparse Pi')\n parser.add_argument('Beta_output', help = 'the estimated beta')\n parser.add_argument('Pi_output', help = 'the estimated pi')\n parser.add_argument('--k', type = int, default = 3, help = 'k for Top_k method in sparse_Pi')\n parser.add_argument('--seed', type = int, default = 0, help = 'the random seed (defaults to 0)')\n args = parser.parse_args()\n \n np.random.seed(args.seed)\n\n # read files\n # srcfile = open(args.src_input, encoding = 'utf-8', errors='surrogateescape')\n # trgfile = open(args.trg_input, encoding = 'utf-8', errors='surrogateescape')\n # grpfile = open(args.group_information, encoding = 'utf-8', errors='surrogateescape')\n X = read_mat_file(args.src_input)\n Y = read_mat_file(args.trg_input) \n grp_info = read_mat_file(args.group_information) \n\n\n\n estPi = args.estPi\n nlambda = args.nlambda\n sparse_method = args.sparse_method\n \n # check dimension match\n if X.shape != Y.shape:\n print('The dimensions of X and Y do not match!')\n sys.exit(-1)\n\n if X.shape == Y.shape:\n N,p = X.shape\n\n # W estimation\n Beta = gradient_update_nogrp(X,Y,alpha = 1,convergence = 1e-10)\n Yhat = X.dot(Beta)\n\n # Lambda searching\n cv_rslt = find_lambda_cv(estPi,p,N,Y,Yhat,nlambda,grp_info,sparse_method,args.k)\n cv_err = cv_rslt[\"cv_err\"]\n lambda_all = cv_rslt[\"lambda_all\"]\n lambda_cv = lambda_all[np.argmin(cv_err)]\n\n # Pi estimation\n Pi = fitpi_CV(estPi,Y,Yhat,lambda_cv,grp_info,sparse_method,args.k)\n ind = np.asarray(np.where(np.amax(norm_l2(Pi),axis = 0) == 1)).flatten()\n X_match = Pi.dot(X)[ind,:]\n Y_match = Y[ind,:]\n index_matched = grp_info[ind]\n ugrp_matched = np.unique(index_matched)\n\n # Using matched data to estimate W again\n Beta_update = gradient_update_nogrp(X_match,Y_match,alpha=1,convergence=1e-10)\n\n results = {'beta' :Beta_update,'pi':Pi}\n\n # Write mapped embeddings\n # Betafile = open(args.Beta_output, mode='w', encoding=args.encoding, errors='surrogateescape')\n # Pifile = open(args.Pi_output, mode='w', encoding=args.encoding, errors='surrogateescape')\n np.savetxt(args.Beta_output,Beta_update)\n np.savetxt(args.Pi_output,Pi)\n # Betafile.close()\n # Pifile.close()\n\n\nif __name__ == '__main__':\n main()","repo_name":"yufengzhang1995/iSphereMAP","sub_path":"iSphereMAP/iSphereMAP.py","file_name":"iSphereMAP.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29330465596","text":"class Matrix:\n def __init__(self, rows, cols):\n self.rows = int(rows) #кол-во строк в матрице\n self.cols = int(cols) #кол-во столбцов в матрице\n self.matrix = [[0] * cols for _ in range(rows)]\n\n def set_element(self, row, col, value):\n self.row = row\n self.col = col\n self.value = int(value)\n self.matrix[row][col] = self.value\n print (self.matrix)\n \n def get_element(self, row, col):\n self.row = row\n self.col = col\n return self.matrix[row][col]\n\n def get_rows(self):\n return self.rows\n \n \n def get_cols(self):\n return self.cols\n\nmy_rows = int(input('Введите, сколько должно быть строк в матрице:'))\nmy_cols = int(input('Введите, сколько должно быть столбцов в матрице:'))\nd = Matrix(my_rows,my_cols)\nmy_row_index = int(input('Введите индекс строки, куда вставить элемент:'))\nmy_col_index = int(input('Введите индекс столбца, куда вставит элемент:'))\nmy_value_index = int(input('Введите элемент, который хотите вставить в матрицу:'))\nd.set_element(my_row_index,my_col_index,my_value_index)\nmy_row_check = int(input('Введите индекс строки, чтобы узнать какой по нему стоит элемент:'))\nmy_col_check = int(input('Введите индекс столбца, чтобы узнать какой по нему стоит элемент:'))\nprint (f'Под индексом строки: {my_row_check} и индексом столбца: {my_col_check} находится элемент: {d.get_element(my_row_check,my_col_check)}')\nprint (f'В матрице {d.get_rows()} строк') \nprint (f'В матрице {d.get_cols()} столбцов')","repo_name":"KarinaHolubeva/Python.Karyna","sub_path":"Homework_15/zadacha_1.py","file_name":"zadacha_1.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18198078055","text":"from contextlib import ExitStack\nimport logging\nimport threading\n\nfrom pydbus import SessionBus # type: ignore\nfrom pydbus.generic import signal # type: ignore\n\nfrom pladder.dbus import PLADDER_CONNECTOR_XML, RetryProxy, dbus_loop\nfrom pladder.irc.client import Hook\n\n\nRELOAD_TIMEOUT_SECONDS = 5*60\n\n\nlogger = logging.getLogger(\"pladder.irc\")\n\n\nclass DbusHook(Hook, ExitStack):\n def __init__(self, config, client):\n super().__init__()\n self.config = config\n bus = SessionBus()\n self.bot = RetryProxy(bus, \"se.raek.PladderBot\")\n self.connector = PladderConnector(bus, config, client)\n self.enter_context(dbus_loop())\n self.connector.ReloadComplete()\n\n def on_trigger(self, timestamp, channel, sender, text):\n return self.bot.RunCommand(timestamp, self.config.network, channel, sender.nick, text,\n on_error=self._handle_bot_error)\n\n def _handle_bot_error(self, e):\n if \"org.freedesktop.DBus.Error.ServiceUnknown\" in str(e):\n return {\n \"text\": \"Internal error: could not reach pladder-bot. \" +\n \"Please check the log: \\\"journalctl --user-unit pladder-bot.service -e\\\"\",\n \"command\": \"error\",\n }\n else:\n logger.error(str(e))\n return {\n \"text\": \"Internal error: \" + str(e),\n \"command\": \"error\",\n }\n\n\nclass PladderConnector:\n # Note: methods of this class are called in the separate GLib main\n # loop thread.\n\n dbus = PLADDER_CONNECTOR_XML\n\n def __init__(self, bus, config, client):\n self.client = client\n self.config = config\n bus.publish(f\"se.raek.PladderConnector.{config.network}\", self)\n\n def GetConfig(self):\n return {\n \"network\": self.config.network,\n \"host\": self.config.host,\n \"port\": str(self.config.port),\n \"nick\": self.config.nick,\n \"realname\": self.config.realname,\n \"trigger_prefix\": self.config.trigger_prefix,\n \"reply_prefix\": self.config.reply_prefix,\n }\n\n def SendMessage(self, channel, text):\n if not channel.startswith(\"#\"):\n return f\"Invalid channel name: {channel}\"\n elif channel not in self.client.get_channels():\n return f\"Not joined to channel: {channel}\"\n else:\n self.client.send_message(channel, text)\n return \"Message sent.\"\n\n def GetChannels(self):\n return self.client.get_channels()\n\n def GetChannelUsers(self, channel):\n return self.client.get_channel_users(channel)\n\n def TriggerReload(self):\n self.client.trigger_detach()\n return True\n\n ReloadComplete = signal()\n\n\ndef send_reload_trigger(config):\n logger.info(f\"Sending reload trigger to connector for {config.network}...\")\n reload_complete = threading.Event()\n bus = SessionBus()\n try:\n client = bus.get(f\"se.raek.PladderConnector.{config.network}\")\n except Exception:\n logger.error(\"Coult not reach connector!\")\n return False\n with dbus_loop():\n client.ReloadComplete.connect(reload_complete.set)\n if not client.TriggerReload():\n logger.error(\"Reload not supported!\")\n return False\n logger.info(\"Reload triggered successfully.\")\n logger.info(\"Wating for connector to complete reload...\")\n if not reload_complete.wait(timeout=RELOAD_TIMEOUT_SECONDS):\n logger.error(\"Reload did not complete in {RELOAD_TIMEOUT_SECONDS} seconds!\")\n return False\n logger.info(\"Reload completed.\")\n return True\n","repo_name":"raek/pladder","sub_path":"pladder/irc/dbus.py","file_name":"dbus.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"42261639833","text":"import json\r\nimport os\r\nfrom fp2a_keywordsProcessing import keywordsWrite\r\nfrom fp2a_qe2qeJson import isNonStandardSectionTitle\r\nfrom fp2a_numericalOrbitalManager import findNAOByCutoff\r\nfrom fp2a_jsonRotation import jsonRotate\r\n\r\ndef discrepantKeywordsConversion(keyword, value):\r\n # This function can only complete single line conversion\r\n # for more complicated version, program in generateABACUSFiles function :(\r\n if keyword == 'calculation':\r\n optionListDirect = ['scf', 'relax', 'nscf', 'md']\r\n if optionListDirect.count(value) > 0:\r\n return value\r\n elif value == 'vc-relax':\r\n return 'cell-relax' \r\n else:\r\n print('|-Conversion warning: ' + value + ' is not a valid value for keyword ' + keyword + ', using default value')\r\n return 'scf'\r\n if keyword == 'ibrav':\r\n ibravReturnDict = {\r\n \"0\": \"none\", \"1\": \"cubic\", \"2\": \"fcc\", \"3\": \"bcc\", \"4\": \"hexagonal\", \"5\": \"triangoal\", \"6\": \"st\",\r\n \"7\": \"bct\", \"8\": \"so\", \"9\": \"baco\", \"10\": \"fco\", \"11\": \"bco\", \"12\": \"sm\", \"13\": \"bacm\", \"14\": \"triclinic\"\r\n }\r\n if str(value) in ibravReturnDict:\r\n return ibravReturnDict[str(value)]\r\n else:\r\n raise ValueError('*ERROR: ibrav = ' + str(value) + ' is not supported in ABACUS')\r\n else:\r\n return value\r\n\r\ndef discrepantModuleConversion(qeDict, module = 'smearing'):\r\n\r\n # in this function I package up conversion of set of keywords controlling the same module\r\n # for example, smearing_method (ABACUS) is controlled by occupations and smearing (QE)\r\n\r\n if module == 'smearing':\r\n print('|-Runtime information: keyword \\'occupations\\' is needed to specially treated:')\r\n if qeDict[\"system\"][\"occupations\"] == 'smearing':\r\n print('|-Runtime information: keyword \\'occupations\\' is set to \\'smearing\\'')\r\n if 'smearing' in qeDict[\"system\"]:\r\n print('|-Runtime information: keyword \\'smearing\\' is set to \\'' \r\n + qeDict[\"system\"][\"smearing\"] + '\\'')\r\n optionListGaussian = ['gaussian', 'gauss']\r\n optionListFermiDirac = ['fermi-dirac', 'f-d', 'fd']\r\n optionListMethfesselPaxton = ['methfessel-paxton', 'm-p', 'mp']\r\n if optionListGaussian.count(qeDict[\"system\"][\"smearing\"]) > 0:\r\n return ['smearing_method'], ['gaussian']\r\n elif optionListFermiDirac.count(qeDict[\"system\"][\"smearing\"]) > 0:\r\n return ['smearing_method'], ['fermi-dirac']\r\n elif optionListMethfesselPaxton.count(qeDict[\"system\"][\"smearing\"]) > 0:\r\n return ['smearing_method'], ['methfessel-paxton']\r\n else:\r\n print('|-Runtime information: keyword \\'smearing\\' is not supported by ABACUS, set to default \\'gaussian\\'')\r\n return ['smearing_method'], ['gaussian']\r\n else:\r\n print('|-Runtime information: keyword \\'smearing\\' is not set, default is \\'gaussian\\'')\r\n return ['smearing_method'], ['gaussian']\r\n else:\r\n print('|-Runtime information: keyword \\'occupations\\' is not supported by ABACUS, set to default \\'fixed\\'')\r\n return ['smearing_method'], ['fixed']\r\n \r\n elif module == 'vdw':\r\n optionListD2 = ['grimme-d2', 'Grimme-D2', 'DFT-D', 'dft-d']\r\n optionListD3 = ['grimme-d3', 'Grimme-D3', 'DFT-D3', 'dft-d3']\r\n if optionListD2.count(qeDict[\"system\"][\"vdw_corr\"]) > 0:\r\n return ['vdw_method'], ['d2']\r\n elif optionListD3.count(qeDict[\"system\"][\"vdw_corr\"]) > 0:\r\n if 'dftd3_version' in qeDict[\"system\"]:\r\n if qeDict[\"system\"][\"dftd3_version\"] == 2:\r\n raise Exception('*ERROR: parameter conflict! you set vdw_corr to dft-d3 but dftd3_version to 2, which means dft-d2')\r\n elif qeDict[\"system\"][\"dftd3_version\"] == 3:\r\n return ['vdw_method'], ['d3_0']\r\n elif qeDict[\"system\"][\"dftd3_version\"] == 4:\r\n return ['vdw_method'], ['d3_bj']\r\n elif qeDict[\"system\"][\"dftd3_version\"] == 5:\r\n print('|-Conversion warning: DFTD3 version 5 (Grimme-D3M (zero damping)) is not implemented in abacus, will use version 3 instead')\r\n return ['vdw_method'], ['d3_0']\r\n elif qeDict[\"system\"][\"dftd3_version\"] == 6:\r\n print('|-Conversion warning: DFTD3 version 6 (Grimme-D3M (BJ damping)) is not implemented in abacus, will use version 4 instead')\r\n return ['vdw_method'], ['d3_bj']\r\n else:\r\n raise Exception('*ERROR: DFTD3 version is not valid')\r\n else:\r\n return ['vdw_method'], ['d3_0']\r\n else:\r\n return ['vdw_method'], ['none']\r\n \r\n elif module == 'lda_plus_u':\r\n # this module is only for qe < 7.1\r\n hubbardUList = []\r\n ntyp = qeDict[\"system\"][\"ntyp\"]\r\n # first scan if all hubbard parameters are corectly defined\r\n for idxType in range(ntyp):\r\n if f'Hubbard_U({idxType+1})' not in qeDict[\"system\"]:\r\n raise Exception(f'*ERROR: Hubbard_U for atom type-{idxType+1} is not set')\r\n for keyword in qeDict[\"system\"]:\r\n if keyword.startswith('starting_ns_eigenvalue'):\r\n print('|-Conversion warning: starting_ns_eigenvalue is not supported in ABACUS, will be ignored')\r\n for idxType in range(ntyp):\r\n hubbardU = qeDict[\"system\"][f'Hubbard_U({idxType+1})']\r\n hubbardUList.append(hubbardU)\r\n returnValue = ' '.join([str(x) for x in hubbardUList])\r\n return ['hubbard_u'], [returnValue]\r\n\r\n elif module == 'noncolin':\r\n ntyp = qeDict[\"system\"][\"ntyp\"]\r\n valueList = []\r\n keywordList = []\r\n for ityp in range(ntyp):\r\n angle12List = []\r\n if f'angle1({ityp+1})' in qeDict[\"system\"]:\r\n angle12List.append(qeDict[\"system\"][f'angle1({ityp+1})'])\r\n else:\r\n raise Exception(f'*ERROR: noncolinear parameter angle1 for atom type-{ityp+1} is not set')\r\n if f'angle2({ityp+1})' in qeDict[\"system\"]:\r\n angle12List.append(qeDict[\"system\"][f'angle2({ityp+1})'])\r\n else:\r\n raise Exception(f'*ERROR: noncolinear parameter angle2 for atom type-{ityp+1} is not set')\r\n keywordList.append(['angle1', 'angle2'])\r\n valueList.append(angle12List)\r\n\r\n return keywordList, valueList\r\n else:\r\n return [''], ['']\r\ndef hubbardManifoldToNumber(manifold):\r\n if manifold == 'unknown_manifold_from_old_qe_versions':\r\n print('|-Hubbard warning: For you are using Quantum ESPRESSO < 7.1, \\n'\r\n +'| no information for determining the angular momentum of the element, \\n'\r\n +'| therefore ASSUMING DFT+U DISABLED')\r\n return '-1'\r\n elif manifold[-1] == 's':\r\n return '0'\r\n elif manifold[-1] == 'p':\r\n return '1'\r\n elif manifold[-1] == 'd':\r\n return '2'\r\n elif manifold[-1] == 'f':\r\n return '3'\r\n elif manifold[-1] == 'g':\r\n return '4'\r\n elif manifold[-1] == 'h':\r\n return '5'\r\n else:\r\n # are you serious you are using such a high angular momentum?\r\n return '-1'\r\n\r\ndef fromElementFindLinesInXYZ(element, elementListInXYZ):\r\n\r\n lines = []\r\n for i in range(len(elementListInXYZ)):\r\n if elementListInXYZ[i] == element:\r\n lines.append(i)\r\n return lines\r\n\r\ndef generateABACUSFiles(\r\n mode = 'on-the-fly',\r\n qeInputScriptJson = None,\r\n qeInputScriptJsonFile = 'qeInputScript.json', \r\n keywordConversionJson = None,\r\n keywordConversionJsonFile = 'keywordConversion.json', \r\n ABACUSInputFile = 'INPUT', \r\n ABACUSStructureFile = 'STRU', \r\n ABACUSKpointsFile = 'KPT',\r\n qeVersion_7_1 = True,\r\n overwriteKeywords = {},\r\n additionalKeywords = {\"numerical_orbitals\": {\"use_nao\": False}},\r\n boolUpdateInformation = False\r\n ):\r\n\r\n \"\"\"\r\n # generate ABACUS input files from QE input script in json format\\n\r\n @param mode: the mode controlling how to read dictionary data, available choices are on-the-fly and file\\n\r\n @param qeInputScriptJsonFile: QE input script in json format generated by qe2qeJson.py\\n\r\n @param keywordConversionJsonFile: json file containing the conversion table of QE keywords to ABACUS keywords,\r\n generated by jsonRotation.py\\n\r\n @param ABACUSInputFile: ABACUS input file name, default is INPUT\\n\r\n @param ABACUSStructureFile: ABACUS structure file name, default is STRU\\n\r\n @param ABACUSKpointsFile: ABACUS kpoints file name, default is KPT\\n\r\n @param qeVersion_7_1: whether the QE version is >= 7.1, default is True\\n\r\n @param overwriteKeywords: keywords to be overwritten, default is empty\\n\r\n @param additionalKeywords: additional keywords to be added to the ABACUS input file, default contains a\r\n dictionary named numerical orbitals\\n\r\n \"\"\"\r\n qeNonstandardSections = []\r\n discrepantModuleKeywordsExclude = [\r\n 'smearing', 'dftd3_version'\r\n ]\r\n if mode == 'on-the-fly':\r\n if qeInputScriptJson == None:\r\n qeInputScript = {}\r\n raise Exception('*ERROR: qeInputScriptJson is not provided')\r\n if keywordConversionJson == None:\r\n keywordConversion = {}\r\n raise Exception('*ERROR: keywordConversionJson is not provided')\r\n else:\r\n qeInputScript = qeInputScriptJson\r\n keywordConversion = keywordConversionJson\r\n\r\n elif mode == 'file':\r\n if os.path.isfile(qeInputScriptJsonFile) == False:\r\n raise Exception('*ERROR: qeInputScriptJsonFile does not exist')\r\n else:\r\n with open(qeInputScriptJsonFile, 'r') as f:\r\n qeInputScript = json.load(f)\r\n if os.path.isfile(keywordConversionJsonFile) == False:\r\n print('*WARNING: keywordConversionJsonFile does not exist, try to generate it...')\r\n if os.path.isfile('depuis_le_abacus.json'):\r\n print('|-Runtime information: depuis_le_abacus.json found, will use it to generate keywordConversion dictionary')\r\n keywordConversionJsonFile = jsonRotate(rotateSequence = 'qac', sourceDictionaryFileName='depuis_le_abacus.json')\r\n with open(keywordConversionJsonFile, 'r') as f:\r\n keywordConversion = json.load(f)\r\n else:\r\n raise Exception('*ERROR: keywordConversionJsonFile does not exist and depuis_le_abacus.json does not exist')\r\n else:\r\n with open(keywordConversionJsonFile, 'r') as f:\r\n keywordConversion = json.load(f)\r\n else:\r\n qeInputScript = {}\r\n keywordConversion = {}\r\n raise Exception('*ERROR: mode is not valid')\r\n\r\n# INPUT file write from here ======================================================================\r\n with open(ABACUSInputFile, 'w') as f:\r\n f.writelines('INPUT_PARAMETERS\\n')\r\n if additionalKeywords[\"numerical_orbitals\"][\"use_nao\"]:\r\n f.writelines('basis_type lcao\\n')\r\n print('|-Numerical orbitals: change solver to genelpa')\r\n qeInputScript[\"electrons\"][\"diagonalization\"] = \"genelpa\"\r\n else:\r\n f.writelines('basis_type pw\\n')\r\n if len(qeInputScript.keys()) == 0:\r\n raise Exception('*ERROR: QE input script is empty')\r\n\r\n for section in qeInputScript:\r\n\r\n if section == '__version__':\r\n if qeInputScript[section] == '>=7.1':\r\n qeVersion_7_1 = True\r\n else:\r\n qeVersion_7_1 = False\r\n elif isNonStandardSectionTitle(section):\r\n if section != 'CELL_PARAMETERS' and section != 'ATOMIC_POSITIONS' and section != 'K_POINTS' and section != 'ATOMIC_SPECIES':\r\n qeNonstandardSections.append(section)\r\n else:\r\n for keyword in qeInputScript[section]:\r\n if keyword in keywordConversion.keys():\r\n if boolUpdateInformation:\r\n print('|-Runtime information: original keyword: ' + keyword)\r\n print('|-Runtime information: converted keyword: ' + keywordConversion[keyword][0])\r\n print('|-Runtime information: pass its value: ' + str(qeInputScript[section][keyword]))\r\n convertedKeyword = keywordConversion[keyword][0]\r\n value = discrepantKeywordsConversion(keyword, qeInputScript[section][keyword])\r\n print('|-Runtime information: comment in depuis_le_abacus.json\\n| ' \r\n + ' '*21 + 'Keyword: ' + convertedKeyword + '\\n| ' + ' '*21 + 'Comment: ' + keywordConversion[keyword][-1])\r\n if convertedKeyword in overwriteKeywords.keys():\r\n print('|-Overwrite: will overwrite keyword ' + keyword \r\n + ' with value ' + keywordsWrite(overwriteKeywords[convertedKeyword]))\r\n value = overwriteKeywords[convertedKeyword]\r\n f.writelines(convertedKeyword + ' ' + keywordsWrite(value) + '\\n')\r\n # subscript 0 is the position of abacus keyword in the list of values\r\n else:\r\n # Discrapant module conversion ========================================================\r\n # relevant keywords are not included in conversion table\r\n if discrepantModuleKeywordsExclude.count(keyword) > 0:\r\n continue\r\n elif keyword == 'occupations':\r\n keywordToWrite, valueToWrite = discrepantModuleConversion(qeInputScript, module = 'smearing')\r\n for i in range(len(keywordToWrite)):\r\n f.writelines(keywordToWrite[i] + ' ' + keywordsWrite(valueToWrite[i]) + '\\n')\r\n elif keyword == 'vdw_corr':\r\n keywordToWrite, valueToWrite = discrepantModuleConversion(qeInputScript, module = 'vdw')\r\n for i in range(len(keywordToWrite)):\r\n f.writelines(keywordToWrite[i] + ' ' + keywordsWrite(valueToWrite[i]) + '\\n')\r\n elif keyword.startswith('Hubbard_U(') and qeVersion_7_1 == False:\r\n print('|-Conversion warning: Keyword ' + keyword + ' is deprecated in QE version >= 7.1')\r\n else:\r\n print('|-Conversion warning: Keyword ' + keyword + ' not found in conversion table')\r\n\r\n # proceeding non-standard section that unrevelant with STRU and KPT/KLINE files\r\n for section in qeNonstandardSections:\r\n if section == 'HUBBARD':\r\n # well I should say pw is not implemented with dft+u in abacus now...\r\n # maybe pw is not quite encouraged to be used in abacus? i don't know\r\n # but i set it here just in case\r\n if qeVersion_7_1:\r\n # because for qe >= 7.1, enabling dft+u calculation is from defining HUBBARD section rather than lda_plus_u\r\n # keyword, such keyword is deprecated in version >= 7.1\r\n f.writelines('dft_plus_u 1\\n')\r\n else:\r\n pass\r\n \r\n manifoldCorrection = []\r\n hubbardUValues = []\r\n for element in qeInputScript['ATOMIC_SPECIES']['elements']:\r\n # abacus also has not implement manifold-by-manifold scheme of dft+u, \r\n # so presently i just use the last one\r\n try:\r\n manifold = list(qeInputScript['HUBBARD']['on-site'][element].keys())[-1]\r\n manifoldCorrection.append(hubbardManifoldToNumber(manifold))\r\n hubbardUValues.append(\r\n keywordsWrite(\r\n qeInputScript['HUBBARD']['on-site'][element][manifold][\"U\"]\r\n )\r\n )\r\n except KeyError:\r\n manifoldCorrection.append('-1')\r\n hubbardUValues.append('0.0')\r\n orbital_corr = ' '.join(manifoldCorrection)\r\n f.writelines('orbital_corr ' + orbital_corr + '\\n')\r\n hubbard_u = ' '.join(hubbardUValues)\r\n f.writelines('hubbard_u ' + hubbard_u + '\\n')\r\n\r\n elif section == 'SOLVANTS':\r\n print('|-Conversion warning: a direct conversion from QE-RISM to ABACUS-Implicit solvation model is risky. You should really know what you are doing.')\r\n if qeInputScript[\"system\"][\"trism\"]:\r\n print('*Warning: conversion of solvation from QE to ABACUS is discouraged.')\r\n pass\r\n elif section == 'OCCUPATIONS':\r\n pass\r\n for keyword in additionalKeywords:\r\n if keyword != 'numerical_orbitals':\r\n f.writelines(keyword + ' ' + keywordsWrite(additionalKeywords[keyword]) + '\\n')\r\n else:\r\n if additionalKeywords[keyword][\"use_nao\"]:\r\n f.writelines('#numerical_orbitals are added\\n')\r\n# STRU file write from here =======================================================================\r\n with open(ABACUSStructureFile, 'w') as f:\r\n f.writelines('ATOMIC_SPECIES\\n')\r\n for ityp in range(qeInputScript['system']['ntyp']):\r\n f.writelines(\r\n qeInputScript['ATOMIC_SPECIES']['elements'][ityp] \r\n + ' ' \r\n + keywordsWrite(qeInputScript['ATOMIC_SPECIES']['masses'][ityp])\r\n + ' ' \r\n + keywordsWrite(qeInputScript['ATOMIC_SPECIES']['pseudopotentials'][ityp])\r\n + '\\n')\r\n # add numerical orbitals\r\n if additionalKeywords[\"numerical_orbitals\"][\"use_nao\"]:\r\n f.writelines('\\nNUMERICAL_ORBITAL\\n')\r\n if len(additionalKeywords[\"numerical_orbitals\"][\"atom_species\"]) == 0:\r\n print(\"|-Numerical orbitals: no atom species specified in fp2a.inp, will use atom species from ATOMIC_SPECIES\")\r\n atomSpecies = qeInputScript['ATOMIC_SPECIES']['elements']\r\n else:\r\n atomSpecies = additionalKeywords[\"numerical_orbitals\"][\"atom_species\"]\r\n\r\n naoSelectMode = additionalKeywords[\"numerical_orbitals\"][\"select_nao\"]\r\n print(\"|-Numerical orbitals: select_nao mode: \" + naoSelectMode)\r\n for idxSpecies in range(len(atomSpecies)):\r\n if naoSelectMode == 'radius_min' or naoSelectMode == 'radius_max':\r\n threshold = additionalKeywords[\"numerical_orbitals\"][\"cutoff_list_radius\"][idxSpecies]\r\n elif naoSelectMode == 'energy_min' or naoSelectMode == 'energy_max':\r\n threshold = additionalKeywords[\"numerical_orbitals\"][\"cutoff_list_energy\"][idxSpecies]\r\n elif naoSelectMode == 'energy_min_radius_min' or naoSelectMode == 'energy_min_radius_max' or naoSelectMode == 'energy_max_radius_min' or naoSelectMode == 'energy_max_radius_max':\r\n threshold = [\r\n additionalKeywords[\"numerical_orbitals\"][\"cutoff_list_energy\"][idxSpecies], \r\n additionalKeywords[\"numerical_orbitals\"][\"cutoff_list_radius\"][idxSpecies]\r\n ]\r\n else:\r\n raise ValueError(\"Numerical orbitals: select_nao mode not recognized\")\r\n naoFileName = findNAOByCutoff(\r\n element = atomSpecies[idxSpecies],\r\n threshold = threshold,\r\n zetaSelection = additionalKeywords[\"numerical_orbitals\"][\"basis_type\"],\r\n mode = naoSelectMode\r\n )\r\n f.writelines(atomSpecies[idxSpecies] + ' ' + keywordsWrite(naoFileName) + '\\n')\r\n # end of numerical orbitals\r\n f.writelines('\\nLATTICE_CONSTANT\\n1.0\\n')\r\n f.writelines('\\nLATTICE_VECTORS\\n')\r\n for cell_vector in qeInputScript['CELL_PARAMETERS']['cell']:\r\n f.writelines(\r\n keywordsWrite(cell_vector[0]) + ' ' \r\n + keywordsWrite(cell_vector[1]) + ' ' \r\n + keywordsWrite(cell_vector[2]) + '\\n')\r\n f.writelines('\\nATOMIC_POSITIONS\\nCartesian\\n\\n')\r\n\r\n for ityp in range(qeInputScript['system']['ntyp']):\r\n f.writelines(qeInputScript['ATOMIC_SPECIES']['elements'][ityp] + '\\n')\r\n # ================ write starting_magnetization of present species =================\r\n if 'nspin' in qeInputScript['system']:\r\n if qeInputScript['system']['nspin'] == 1:\r\n f.writelines('0.0\\n')\r\n elif qeInputScript['system']['nspin'] == 2:\r\n try:\r\n f.writelines(keywordsWrite(\r\n qeInputScript['ATOMIC_SPECIES']['starting_magnetization'][ityp]\r\n ) + '\\n')\r\n except KeyError:\r\n raise Exception('*ERROR: nspin = 2 but starting_megnetization is not defined, this is not a complete qe input.')\r\n elif 'noncolin' in qeInputScript['system']:\r\n if qeInputScript['system']['noncolin']:\r\n f.writelines(keywordsWrite(1.0) + '\\n')\r\n else:\r\n print('|-Conversion warning: nspin will set to 1 as default. But possible to leave odd number of electrons requiring RKS error.')\r\n f.writelines('0.0\\n')\r\n # ================ write XYZ, constraint, mag and angle1 and 2 =================\r\n lineIndices = fromElementFindLinesInXYZ(\r\n qeInputScript['ATOMIC_SPECIES']['elements'][ityp], \r\n qeInputScript['ATOMIC_POSITIONS']['elements']\r\n )\r\n f.writelines(str(len(lineIndices)) + '\\n')\r\n for lineIndex in lineIndices:\r\n f.writelines(\r\n keywordsWrite(qeInputScript['ATOMIC_POSITIONS']['coordinates'][lineIndex][0])\r\n + ' ' + keywordsWrite(qeInputScript['ATOMIC_POSITIONS']['coordinates'][lineIndex][1])\r\n + ' ' + keywordsWrite(qeInputScript['ATOMIC_POSITIONS']['coordinates'][lineIndex][2])\r\n + ' ')\r\n f.writelines('m '\r\n + keywordsWrite(abs(qeInputScript['ATOMIC_POSITIONS']['constraints'][lineIndex][0]-1))\r\n + ' ' + keywordsWrite(abs(qeInputScript['ATOMIC_POSITIONS']['constraints'][lineIndex][1]-1))\r\n + ' ' + keywordsWrite(abs(qeInputScript['ATOMIC_POSITIONS']['constraints'][lineIndex][2]-1))\r\n + ' '\r\n )\r\n if 'nspin' in qeInputScript['system']:\r\n if qeInputScript['system']['nspin'] == 2:\r\n f.writelines(\r\n 'mag '+keywordsWrite(qeInputScript['ATOMIC_SPECIES']['starting_magnetization'][ityp])\r\n + ' '\r\n )\r\n if 'noncolin' in qeInputScript['system']:\r\n if qeInputScript['system']['noncolin']:\r\n f.writelines(\r\n 'mag '+keywordsWrite(qeInputScript['ATOMIC_SPECIES']['starting_magnetization'][ityp])\r\n + ' '\r\n )\r\n f.writelines(\r\n 'angle1 ' + keywordsWrite(qeInputScript['ATOMIC_SPECIES']['noncolinear_angle1and2'][ityp][0])\r\n +' angle2 ' + keywordsWrite(qeInputScript['ATOMIC_SPECIES']['noncolinear_angle1and2'][ityp][1])\r\n +'\\n'\r\n )\r\n else:\r\n f.writelines('\\n')\r\n f.writelines('\\n')\r\n# KPT file write from here========================================================================\r\n with open(ABACUSKpointsFile, 'w', encoding='utf-8') as f:\r\n\r\n f.writelines('K_POINTS\\n')\r\n f.writelines('0\\n')\r\n f.writelines('Gamma\\n')\r\n if qeInputScript['K_POINTS']['mode'] == 'mk':\r\n f.writelines(\r\n keywordsWrite(qeInputScript['K_POINTS']['grid'][0]) + ' '\r\n + keywordsWrite(qeInputScript['K_POINTS']['grid'][1]) + ' '\r\n + keywordsWrite(qeInputScript['K_POINTS']['grid'][2]) + ' '\r\n + keywordsWrite(qeInputScript['K_POINTS']['shift'][0]) + ' '\r\n + keywordsWrite(qeInputScript['K_POINTS']['shift'][1]) + ' '\r\n + keywordsWrite(qeInputScript['K_POINTS']['shift'][2]) + '\\n'\r\n )\r\n elif qeInputScript['K_POINTS']['mode'] == 'kpath':\r\n print('|-K_POINTS warning: In this case you should make sure you already have a KPT file to get converged charge density')\r\n with open('KLINES', 'w') as f2:\r\n f2.writelines('K_POINTS\\n')\r\n f2.writelines(str(len(qeInputScript['K_POINTS']['kpts'])) + '\\n')\r\n f2.writelines('Line\\n')\r\n for i, kpt in enumerate(qeInputScript['K_POINTS']['kpts']):\r\n f2.writelines(\r\n keywordsWrite(kpt[0]) + ' ' \r\n + keywordsWrite(kpt[1]) + ' ' \r\n + keywordsWrite(kpt[2]) + ' ' \r\n + keywordsWrite(qeInputScript['K_POINTS']['w_kpts'][i] )\r\n + '\\n')\r\n f2.writelines('\\n\\n')\r\n print('*- fp2a (Quantum ESPRESSO-specific version) finished.')\r\n\r\n# unit test\r\nif __name__ == '__main__':\r\n \r\n import fp2a_io\r\n _, overwriteKeywords, additionalKeywords = fp2a_io.readInputScript()\r\n\r\n generateABACUSFiles(\r\n mode = 'file',\r\n qeInputScriptJsonFile = \"test_noncolinear_qe7.1.json\",\r\n keywordConversionJsonFile = \"depuis_le_abacus_rotated-qac.json\",\r\n qeVersion_7_1 = False,\r\n overwriteKeywords = overwriteKeywords,\r\n additionalKeywords = additionalKeywords\r\n )","repo_name":"kirk0830/fp2a","sub_path":"fp2a_qeJson2abacus.py","file_name":"fp2a_qeJson2abacus.py","file_ext":"py","file_size_in_byte":26567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25449296030","text":"#A python moduláris felépítésű, a sys modul tartalmazza többek között a\n#függvényeket\nimport sys\n\n#A sys.argv egy lista amely parancssori argumentumokat ad vissza a python-nak.\n#A script neve mindig az első, az index 0-ik helyen.\nscript, encoding, error = sys.argv\n\n#A main nevű függvény definiálása, 3 argumentuma van.\ndef main(language_file, encoding, errors):\n#A main nevű függvény a language_file-t soronként beolvassa.\n line = language_file.readline()\n#Azt teszteli, hogy a line változónak van-e értéke, azaz az olvasott sorban\n#van-e karakter. Fogdd így fel: 'if line' = 'if line = true'\n if line:\n#A print_line nevű függvényt hívom meg (azért, hogy az adott sort kinyomtassa.)\n print_line(line, encoding, errors)\n#Amíg nem kapok vissza üres sort a file-ból addig hívom meg a main függvényt,\n#azaz így végiglépkedek soronként a language_file-on\n return main(language_file, encoding, errors)\n\n#A print_line nevű függvény definiálása\ndef print_line(line, encoding, errors):\n#A line string (azaz az éppen olvasott sor) végéről leszedi a sortörést és\n#beleteszi egy next_lang változóba a teljes sort.\n next_lang = line.strip()\n#raw_bytes = numberical bytes in hexadecimal. Fogja az olvasott sort és\n#átrakja, azaz enkódolja raw byte-sba és ezt belerakja egy raw_bytes változóba.\n#Az errors=errors jelenti, hogy hogyan kezelje a hibákat, ezt nem értem.\n raw_bytes = next_lang.encode(encoding, errors=errors)\n#A raw byte-okat visszadekódolom string-be, ennek egyeznie kell végül a\n#next_lang sringgel.\n cooked_string = raw_bytes.decode(encoding, errors=errors)\n\n#A raw byte-ok és a string kinyomtatása, elválasztva őket egy <====> jellel.\n print(raw_bytes, \"<===>\", cooked_string)\n\n#A függvények definició immár készen vannak, most megnyitom a languages.txt\n#file-t, utf-8-as (alapértelmezett) enkódolást használva.\nlanguages = open(\"languages.txt\", encoding=\"utf-8\")\n\n#A main függvény futtatása. Oda fog minden helyre ugrani, ahol a main függvény\n#definiálva volt\nmain(languages, encoding, error)\n\n#A magyar változat, amin jobban látszik, hogy mit miért csinál a program\nprint(\"\\n\" * 5)\n\nimport sys\nscript, enkódolás, hibák = sys.argv\ndef main (nyelvi_szöveg, enkódolás, hibák):\n sor = nyelvi_szöveg.readline()\n if sor:\n sornyomtat(sor,enkódolás, hibák)\n return main (nyelvi_szöveg, enkódolás, hibák)\ndef sornyomtat(sor, encoding, hibák):\n enter_nélküli_sor = sor.strip()\n nyers_byteok = enter_nélküli_sor.encode(encoding, errors=hibák)\n vissza_byteok_stringbe = nyers_byteok.decode(encoding, errors=hibák)\n print(nyers_byteok, \"<====>\", vissza_byteok_stringbe)\nnyelvek = open(\"languages.txt\")\nmain(nyelvek, enkódolás, hibák)\n","repo_name":"dragonka79/pys","sub_path":"ex23.py","file_name":"ex23.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33727227560","text":"from sklearn.datasets import fetch_openml\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\nfrom sklearn.preprocessing import MinMaxScaler\r\ndata=fetch_openml('mnist_784')\r\n\r\nX=data.data\r\ny=data.target\r\ny=np.array([int(x) for x in y])\r\n\r\nX=pd.DataFrame(X)\r\ny=pd.DataFrame(y)\r\n\r\na=np.ones(shape=[70000,1])\r\n\r\na=pd.DataFrame(a*5)\r\ny=(y==a)\r\n\r\n\r\nX_train, X_test, y_train, y_test = X[30000:37000], X[37000:40000], y[30000:37000], y[37000:40000]\r\nX_train=pd.DataFrame(X_train)\r\nX_test=pd.DataFrame(X_test)\r\ny_train=pd.DataFrame(y_train)\r\ny_test=pd.DataFrame(y_test)\r\n\r\nsc=MinMaxScaler()\r\nsc.fit(X_train)\r\nX_train=sc.transform(X_train)\r\nX_test=sc.transform(X_test)\r\n\r\n\r\nknn = KNeighborsClassifier(n_neighbors=4)\r\nknn.fit(X_train, y_train)\r\n\r\ny_pred = knn.predict(X_test)\r\n\r\nprint('Test Accuracy=', end=\" \")\r\nprint(accuracy_score(y_test, y_pred))\r\nprint(confusion_matrix(y_test, y_pred))\r\nprint()\r\n\r\n\r\n\r\n#Test Accuracy= 0.9833333333333333\r\n#Confusion Matrix\r\n#[[2717 8]\r\n# [ 42 233]]\r\n","repo_name":"Harshith-H/digitRecogniser-KNN-LR","sub_path":"code/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13800874930","text":"import os\n\nfrom starlette.responses import JSONResponse\nfrom eduone_mail.app.core.tasks import send_email\nfrom fastapi import APIRouter, File, UploadFile, HTTPException\nfrom pydantic import EmailStr\nfrom eduone_mail.app.core.aws import download_from_cdn, upload_file_to_cdn, delete_file\nimport tempfile\nimport eduone_mail.app.settings.config as Envs\nimport shutil\nfrom eduone_mail.app.schemas import EmailSend\nfrom eduone_mail.app.exceptions import TemplateNotFound\n\n\nrouter = APIRouter()\n\n\n@router.post(\"/email/send\")\nasync def send_email_format(email_sch: EmailSend) -> JSONResponse:\n template_body = email_sch.template_body\n template_name = email_sch.template_name\n template_subject = email_sch.template_subject\n try:\n download_from_cdn(template_name)\n except TemplateNotFound:\n return JSONResponse(status_code=404, content={\"message\": \"Template not found\"})\n await send_email.apply_async(\n args=(email_sch.email, template_body, template_name, template_subject),\n queue='queue2',\n )\n return JSONResponse(status_code=200, content={\"message\": \"email has been sent\"})\n\n\n@router.post(\"/email/register/send\")\nasync def send_email_register(email: EmailStr) -> JSONResponse:\n template_body = {\"title\": 'g', 'name': 'ge'}\n template_name = \"test.html\"\n template_subject = \"Subject\"\n await send_email.apply_async(\n args=(email, template_body, template_name, template_subject), queue='queue2'\n )\n return JSONResponse(status_code=200, content={\"message\": \"email has been sent\"})\n\n\n@router.post(\"/email/upload/template\", tags=[\"Upload template to server\"])\nasync def upload_file(file: UploadFile = File(...)):\n try:\n f = tempfile.NamedTemporaryFile(dir=Envs.TEMPLATE_FOLDER, delete=False)\n tmp_path = f.name\n print(file.filename)\n with open(tmp_path, \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n upload_file_to_cdn(tmp_path, file.filename)\n\n except Exception:\n raise HTTPException(status_code=500, detail=\"Some error in uploading\")\n finally:\n try:\n os.unlink(tmp_path)\n except OSError:\n pass\n","repo_name":"PavelKhanOff/Email-service","sub_path":"app/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73276979785","text":"# 13565번 침투\r\nimport sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\nM, N = map(int, input().split())\r\ngrid = [list(input().rstrip()) for _ in range(M)]\r\n\r\ndxys = ((0,1), (1,0), (-1,0), (0,-1))\r\nvisited = [[False]*N for _ in range(M)]\r\n\r\ndef bfs(s_x, s_y):\r\n queue = deque([])\r\n queue.append([s_x, s_y])\r\n visited[s_y][s_x] = True\r\n while queue:\r\n c_x, c_y = queue.popleft()\r\n for dx, dy in dxys:\r\n n_x = c_x + dx\r\n n_y = c_y + dy\r\n if 0 <= n_x < N and 0 <= n_y < M and visited[n_y][n_x] == False and grid[n_y][n_x] == \"0\":\r\n if n_y == M-1:\r\n return True\r\n visited[n_y][n_x] = True\r\n queue.append([n_x, n_y])\r\n\r\nflag = False\r\nfor i in range(N):\r\n if grid[0][i] == \"0\":\r\n flag = bfs(i, 0)\r\n if flag == True:\r\n break\r\n\r\nif flag:\r\n print(\"YES\")\r\nelse:\r\n print(\"NO\")\r\n","repo_name":"glaxyt/bojSolv","sub_path":"13565.py","file_name":"13565.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12703298614","text":"from helpers import *\n\n# Data structure:\n# year, category, prize, motivation, prize_share, laureate_type, full_name, birth_date,\n# birth_city, birth_country, birth_country_current, sex, organization_name,\n# organization_city, organization_country, ISO\n\n\ndef day_078():\n df_data = pd.read_csv(\"./nobel_prize_data.csv\")\n # print(df_data)\n # print(df_data.info())\n # print(df_data.head())\n # print(df_data.isna().values.any()) # check if na values present (PRESENT)\n # print(df_data.duplicated().values.any()) # check for duplicates (NONE)\n # print(df_data.isna().sum()) # count nas (A LOT)\n\n ################# fixing types #################\n # date\n df_data.birth_date = pd.to_datetime(df_data.birth_date)\n\n # change prize share to percentage type new column\n separated_values = df_data.prize_share.str.split(\"/\", expand=True)\n numerator = pd.to_numeric(separated_values[0])\n denomenator = pd.to_numeric(separated_values[1])\n df_data[\"share_pct\"] = numerator / denomenator\n\n ################# Charts #################\n\n ################# 1a (Male to Female Ratio) #################\n # biology = df_data.sex.value_counts()\n # fig = px.pie(\n # labels=biology.index,\n # values=biology.values,\n # title=\"Percentage of Male vs. Female Winners\",\n # names=biology.index,\n # hole=0.4,\n # )\n\n # fig.update_traces(textposition=\"inside\", textfont_size=15, textinfo=\"percent\")\n # fig.show() # women make up only 6.21%\n\n ################# 1b (Print first three female winners, and multiple winners) #################\n # ftf = df_data[df_data.sex == \"Female\"].sort_values(\"year\", ascending=True)[\n # :3\n # ] # first three females winners\n # print(ftf.full_name)\n\n # is_winner = df_data.duplicated(subset=[\"full_name\"], keep=False)\n # multiple_winners = df_data[is_winner]\n # print(\n # f\"There are {multiple_winners.full_name.nunique()}\"\n # \" winners who were awarded the prize more than once.\"\n # )\n # col_subset = [\"year\", \"category\", \"laureate_type\", \"full_name\"]\n # print(multiple_winners[col_subset])\n\n ################# 2 (Prizes per category) #################\n # prizes_per_category = df_data.category.value_counts()\n # v_bar = px.bar(\n # x=prizes_per_category.index,\n # y=prizes_per_category.values,\n # color=prizes_per_category.values,\n # color_continuous_scale=\"Aggrnyl\",\n # title=\"Number of Prizes Awarded per Category\",\n # )\n\n # v_bar.update_layout(\n # xaxis_title=\"Nobel Prize Category\",\n # coloraxis_showscale=False,\n # yaxis_title=\"Number of Prizes\",\n # )\n # v_bar.show()\n # economics = df_data[df_data.category == \"Economics\"].sort_values(\"year\")[:3]\n\n ################# 3 (Sex by Category) #################\n\n # cat_men_women = df_data.groupby([\"category\", \"sex\"], as_index=False).agg(\n # {\"prize\": pd.Series.count}\n # )\n # cat_men_women.sort_values(\"prize\", ascending=False, inplace=True)\n # v_bar_split = px.bar(\n # x=cat_men_women.category,\n # y=cat_men_women.prize,\n # color=cat_men_women.sex,\n # title=\"Number of Prizes Awarded per Category split by Men and Women\",\n # )\n\n # v_bar_split.update_layout(\n # xaxis_title=\"Nobel Prize Category\", yaxis_title=\"Number of Prizes\"\n # )\n # v_bar_split.show()\n\n ################# 4a (Frequency of Prizes) #################\n # prize_per_year = df_data.groupby(by=\"year\").count().prize\n # moving_average = prize_per_year.rolling(window=5).mean()\n # plt.figure(figsize=(16, 8), dpi=200)\n # plt.title(\"Number of Nobel Prizes Awarded per Year\", fontsize=18)\n # plt.yticks(fontsize=14)\n # plt.xticks(ticks=np.arange(1900, 2021, step=5), fontsize=14, rotation=45)\n\n # ax = plt.gca() # get current axis\n # ax.set_xlim(1900, 2020)\n\n # ax.scatter(\n # x=prize_per_year.index,\n # y=prize_per_year.values,\n # c=\"dodgerblue\",\n # alpha=0.7,\n # s=100,\n # )\n\n # ax.plot(\n # prize_per_year.index,\n # moving_average.values,\n # c=\"crimson\",\n # linewidth=3,\n # )\n\n # plt.show()\n\n ################# 4b (Percentage Share of Prize) #################\n\n # yearly_avg_share = df_data.groupby(by=\"year\").agg({\"share_pct\": pd.Series.mean})\n # share_moving_average = yearly_avg_share.rolling(window=5).mean()\n # plt.figure(figsize=(16, 8), dpi=200)\n # plt.title(\"Number of Nobel Prizes Awarded per Year\", fontsize=18)\n # plt.yticks(fontsize=14)\n # plt.xticks(ticks=np.arange(1900, 2021, step=5), fontsize=14, rotation=45)\n\n # ax1 = plt.gca()\n # ax2 = ax1.twinx()\n\n # ax1.set_xlim(1900, 2020)\n\n # # Can invert axis\n # ax2.invert_yaxis()\n\n # ax1.scatter(\n # x=prize_per_year.index, #uncomment lines 93 & 94\n # y=prize_per_year.values,\n # c=\"dodgerblue\",\n # alpha=0.7,\n # s=100,\n # )\n\n # ax1.plot(\n # prize_per_year.index,\n # moving_average.values,\n # c=\"crimson\",\n # linewidth=3,\n # )\n\n # ax2.plot(\n # prize_per_year.index,\n # share_moving_average.values,\n # c=\"grey\",\n # linewidth=3,\n # )\n\n # plt.show()\n\n ################# 5a (Top Awarded Countries) #################\n\n # top_countries = df_data.groupby([\"birth_country_current\"], as_index=False).agg(\n # {\"prize\": pd.Series.count}\n # )\n\n # top_countries.sort_values(by=\"prize\", inplace=True)\n # top20_countries = top_countries[-20:]\n\n # h_bar = px.bar(\n # x=top20_countries.prize,\n # y=top20_countries.birth_country_current,\n # orientation=\"h\",\n # color=top20_countries.prize,\n # color_continuous_scale=\"Viridis\",\n # title=\"Top 20 Countries by Number of Prizes\",\n # )\n\n # h_bar.update_layout(\n # xaxis_title=\"Number of Prizes\", yaxis_title=\"Country\", coloraxis_showscale=False\n # )\n # h_bar.show()\n\n ################# 5b (Top Awarded Countries MAP) #################\n # df_countries = df_data.groupby(\n # [\"birth_country_current\", \"ISO\"], as_index=False\n # ).agg({\"prize\": pd.Series.count})\n # df_countries.sort_values(\"prize\", ascending=False)\n\n # world_map = px.choropleth(\n # df_countries,\n # locations=\"ISO\",\n # color=\"prize\",\n # hover_name=\"birth_country_current\",\n # color_continuous_scale=px.colors.sequential.matter,\n # )\n\n # world_map.update_layout(\n # coloraxis_showscale=True,\n # )\n\n # world_map.show()\n\n ################# 6 (Countries by # Prizes & Category) #################\n\n # cat_country = df_data.groupby(\n # [\"birth_country_current\", \"category\"], as_index=False\n # ).agg({\"prize\": pd.Series.count})\n # cat_country.sort_values(by=\"prize\", ascending=False, inplace=True)\n # print(cat_country)\n\n # # uncomment lines 163-168\n # # change column names\n # merged_df = pd.merge(cat_country, top20_countries, on=\"birth_country_current\")\n\n # merged_df.columns = [\n # \"birth_country_current\",\n # \"category\",\n # \"cat_prize\",\n # \"total_prize\",\n # ]\n # merged_df.sort_values(by=\"total_prize\", inplace=True)\n\n # cat_cntry_bar = px.bar(\n # x=merged_df.cat_prize,\n # y=merged_df.birth_country_current,\n # color=merged_df.category,\n # orientation=\"h\",\n # title=\"Top 20 Countries by Number of Prizes and Category\",\n # )\n\n # cat_cntry_bar.update_layout(xaxis_title=\"Number of Prizes\", yaxis_title=\"Country\")\n # cat_cntry_bar.show()\n\n ################# 7 (Countries Prizers over Time) #################\n\n # prize_by_year = df_data.groupby(\n # by=[\"birth_country_current\", \"year\"], as_index=False\n # ).count()\n # prize_by_year = prize_by_year.sort_values(\"year\")[\n # [\"year\", \"birth_country_current\", \"prize\"]\n # ]\n\n # cumulative_prizes = (\n # prize_by_year.groupby(by=[\"birth_country_current\", \"year\"])\n # .sum()\n # .groupby(level=[0])\n # .cumsum()\n # )\n # cumulative_prizes.reset_index(inplace=True)\n\n # l_chart = px.line(\n # cumulative_prizes,\n # x=\"year\",\n # y=\"prize\",\n # color=\"birth_country_current\",\n # hover_name=\"birth_country_current\",\n # )\n\n # l_chart.update_layout(xaxis_title=\"Year\", yaxis_title=\"Number of Prizes\")\n\n # l_chart.show()\n\n ################# 8 (Top Organisations) #################\n # top20_orgs = df_data.organization_name.value_counts()[:20]\n # top20_orgs.sort_values(ascending=True, inplace=True)\n\n # org_bar = px.bar(\n # x=top20_orgs.values,\n # y=top20_orgs.index,\n # orientation=\"h\",\n # color=top20_orgs.values,\n # color_continuous_scale=px.colors.sequential.haline,\n # title=\"Top 20 Research Institutions by Number of Prizes\",\n # )\n\n # org_bar.update_layout(\n # xaxis_title=\"Number of Prizes\",\n # yaxis_title=\"Institution\",\n # coloraxis_showscale=False,\n # )\n # org_bar.show()\n\n ################# 9 (Top Research Cities) #################\n # top20_org_cities = df_data.organization_city.value_counts()[:20]\n # top20_org_cities.sort_values(ascending=True, inplace=True)\n # city_bar2 = px.bar(\n # x=top20_org_cities.values,\n # y=top20_org_cities.index,\n # orientation=\"h\",\n # color=top20_org_cities.values,\n # color_continuous_scale=px.colors.sequential.Plasma,\n # title=\"Which Cities Do the Most Research?\",\n # )\n\n # city_bar2.update_layout(\n # xaxis_title=\"Number of Prizes\", yaxis_title=\"City\", coloraxis_showscale=False\n # )\n # city_bar2.show()\n\n ################# 10 (Top Birth Cities) #################\n\n # top20_cities = df_data.birth_city.value_counts()[:20]\n # top20_cities.sort_values(ascending=True, inplace=True)\n # city_bar = px.bar(\n # x=top20_cities.values,\n # y=top20_cities.index,\n # orientation=\"h\",\n # color=top20_cities.values,\n # color_continuous_scale=px.colors.sequential.Plasma,\n # title=\"Where were the Nobel Laureates Born?\",\n # )\n\n # city_bar.update_layout(\n # xaxis_title=\"Number of Prizes\",\n # yaxis_title=\"City of Birth\",\n # coloraxis_showscale=False,\n # )\n # city_bar.show()\n\n ################# SUNBURST CHART #################\n\n country_city_org = df_data.groupby(\n by=[\"organization_country\", \"organization_city\", \"organization_name\"],\n as_index=False,\n ).agg({\"prize\": pd.Series.count})\n\n country_city_org = country_city_org.sort_values(\"prize\", ascending=False)\n\n burst = px.sunburst(\n country_city_org,\n path=[\"organization_country\", \"organization_city\", \"organization_name\"],\n values=\"prize\",\n title=\"Where do Discoveries Take Place?\",\n )\n\n burst.update_layout(\n xaxis_title=\"Number of Prizes\", yaxis_title=\"City\", coloraxis_showscale=False\n )\n\n burst.show()\n\n ################# 11 (Winning Age - Various Charts) #################\n\n # birth_years = df_data.birth_date.dt.year\n # df_data[\"winning_age\"] = df_data.year - birth_years\n\n # plt.figure(figsize=(8, 4), dpi=200)\n # sbr.histplot(data=df_data, x=df_data.winning_age, bins=30)\n # plt.xlabel(\"Age\")\n # plt.title(\"Distribution of Age on Receipt of Prize\")\n # plt.show()\n\n # plt.figure(figsize=(8, 4), dpi=200)\n # with sbr.axes_style(\"whitegrid\"):\n # sbr.regplot(\n # data=df_data,\n # x=\"year\",\n # y=\"winning_age\",\n # lowess=True,\n # scatter_kws={\"alpha\": 0.4},\n # line_kws={\"color\": \"black\"},\n # )\n\n # plt.show()\n\n # plt.figure(figsize=(8, 4), dpi=200)\n # with sbr.axes_style(\"whitegrid\"):\n # sbr.boxplot(data=df_data, x=\"category\", y=\"winning_age\")\n\n # plt.show()\n\n # with sbr.axes_style(\"whitegrid\"):\n # sbr.lmplot(\n # data=df_data,\n # x=\"year\",\n # y=\"winning_age\",\n # row=\"category\",\n # lowess=True,\n # aspect=2,\n # scatter_kws={\"alpha\": 0.6},\n # line_kws={\"color\": \"black\"},\n # )\n # plt.show()\n\n # with sbr.axes_style(\"whitegrid\"):\n # sbr.lmplot(\n # data=df_data,\n # x=\"year\",\n # y=\"winning_age\",\n # hue=\"category\",\n # lowess=True,\n # aspect=2,\n # scatter_kws={\"alpha\": 0.5},\n # line_kws={\"linewidth\": 5},\n # )\n\n # plt.show()\n\n\nday_078()\n","repo_name":"idabblewith/SeabornPandasVisualisation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32416950076","text":"#!/usr/bin/env python3\n\n'''\nGiven a 2D array and a number of generations, compute n timesteps of Conway's\nGame of Life.\n\nThe rules of the game are:\n Any live cell with fewer than two live neighbours dies, as if caused by underpopulation.\n Any live cell with more than three live neighbours dies, as if by overcrowding.\n Any live cell with two or three live neighbours lives on to the next generation.\n Any dead cell with exactly three live neighbours becomes a live cell.\n\nEach cell's neighborhood is the 8 cells immediately around it (i.e. Moore\nNeighborhood). The universe is infinite in both the x and y dimensions and all\ncells are initially dead - except for those specified in the arguments. The\nreturn value should be a 2d array cropped around all of the living cells. (If\nthere are no living cells, then return [[]].)\n'''\n\n\nimport numpy as np\n\ndef evolve(X):\n ext = np.zeros((X.shape[0]+4,X.shape[1]+4),dtype=int)\n ext[2:-2,2:-2] = X\n neigh = np.zeros((X.shape[0]+2,X.shape[1]+2),dtype=int)\n neigh = ext[0:-2,0:-2] + ext[0:-2,1:-1] + ext[0:-2,2:] + \\\n ext[1:-1,0:-2] + ext[1:-1,2:] + \\\n ext[2: ,0:-2] + ext[2:,1:-1] + ext[2: ,2:]\n new = np.logical_or(neigh==3,np.logical_and(ext[1:-1,1:-1]==1,neigh==2)).astype(int)\n r_max, r_min = 0, new.shape[0]\n c_max, c_min = 0, new.shape[1]\n for r in range(new.shape[0]):\n for c in range(new.shape[1]):\n if new[r,c] != 0:\n r_max, r_min = (r_max,r)[r>r_max], (r_min,r)[rc_max], (c_min,c)[c 0:\n return SUBLIST\n if len(list1) > len(list2):\n if sum(list1[i:i+len(list2)] == list2\n for i in range(len(list1)-len(list2)+1)) > 0:\n return SUPERLIST\n return UNEQUAL\n","repo_name":"hnlee/exercism-python","sub_path":"sublist/sublist.py","file_name":"sublist.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43225515342","text":"import time\nimport logging\nfrom collections import defaultdict\n\n\nclass Uzbl(object):\n\n def __init__(self, parent, proto, print_events=False):\n proto.target = self\n self.print_events = print_events\n self.parent = parent\n self.proto = proto\n self.time = time.time()\n self.pid = None\n self.name = None\n\n # Flag if the instance has raised the INSTANCE_START event.\n self.instance_start = False\n\n # Use name \"unknown\" until name is discovered.\n self.logger = logging.getLogger('uzbl-instance[]')\n\n # Plugin instances\n self._plugin_instances = []\n self.plugins = {}\n\n # Track plugin event handlers\n self.handlers = defaultdict(list)\n self.request_handlers = defaultdict(list)\n\n # Internal vars\n self._depth = 0\n self._buffer = ''\n\n def __repr__(self):\n return '' % ', '.join([\n 'pid=%s' % (self.pid if self.pid else \"Unknown\"),\n 'name=%s' % ('%r' % self.name if self.name else \"Unknown\"),\n 'uptime=%f' % (time.time() - self.time),\n '%d handlers' % sum([len(l) for l in list(self.handlers.values())]),\n '%d request handlers' % sum([len(l) for l in list(self.request_handlers.values())])])\n\n def init_plugins(self):\n '''Creates instances of per-instance plugins'''\n\n for plugin in self.parent.plugind.per_instance_plugins:\n pinst = plugin(self)\n self._plugin_instances.append(pinst)\n self.plugins[plugin] = pinst\n\n def send(self, msg):\n '''Send a command to the uzbl instance via the child socket\n instance.'''\n\n msg = msg.strip()\n\n if self.print_events:\n self.logger.debug(('%s<-- %s' % (' ' * self._depth, msg)))\n\n self.proto.push((msg+'\\n').encode('utf-8'))\n\n def reply(self, cookie, response):\n if self.print_events:\n self.logger.debug(('%s %s %s' % (' ' * self._depth, cookie, ' '.join(elems))))\n\n final_response = None\n\n if request in self.request_handlers:\n for (prio, handler) in self.request_handlers[request]:\n self._depth += 1\n try:\n (response, args, kargs) = handler(final_response, *args, **kargs)\n if response is not None:\n final_response = response\n except BaseException:\n self.logger.error('error in request handler for \\'%s\\'', request, exc_info=True)\n self._depth -= 1\n\n if final_response is None:\n final_response = ''\n\n self.reply(cookie, final_response)\n\n def event(self, event, *args, **kargs):\n '''Raise an event.'''\n\n event = event.upper()\n\n if self.print_events:\n elems = [event]\n if args:\n elems.append(str(args))\n if kargs:\n elems.append(str(kargs))\n self.logger.debug(('%s--> %s' % (' ' * self._depth, ' '.join(elems))))\n\n if event == \"INSTANCE_START\" and args:\n assert not self.instance_start, 'instance already started'\n\n self.pid = int(args[0])\n self.logger.info('found instance pid %r', self.pid)\n\n self.init_plugins()\n\n elif event == \"INSTANCE_EXIT\":\n self.logger.info('uzbl instance exit')\n self.close()\n\n if event not in self.handlers:\n return\n\n for handler in self.handlers[event]:\n self._depth += 1\n try:\n handler(*args, **kargs)\n\n except BaseException:\n self.logger.error('error in handler for \\'%s\\'', event, exc_info=True)\n\n self._depth -= 1\n\n def close_connection(self, child_socket):\n '''Close child socket and delete the uzbl instance created for that\n child socket connection.'''\n self.proto.close()\n\n def close(self):\n '''Close the client socket and call the plugin cleanup hooks.'''\n\n self.logger.debug('called close method')\n\n # Remove self from parent uzbls dict.\n self.logger.debug('removing self from uzbls list')\n self.parent.remove_instance(self.proto.socket)\n\n for plugin in self._plugin_instances:\n plugin.cleanup()\n del self.plugins # to avoid cyclic links\n del self._plugin_instances\n\n self.logger.info('removed %r', self)\n\n def connect(self, name, handler):\n \"\"\"Attach event handler\n\n No extra arguments added. Use bound methods and partials to have\n extra arguments.\n \"\"\"\n self.handlers[name].append(handler)\n\n def answer_request(self, name, prio, handler):\n \"\"\"Attach request handler\n\n No extra arguments added. Use bound methods and partials to have\n extra arguments.\n \"\"\"\n\n def fst(a):\n return a[0]\n\n self.request_handlers[name].append((prio, handler))\n self.request_handlers[name].sort(key=fst)\n","repo_name":"uzbl/uzbl","sub_path":"uzbl/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7137,"program_lang":"python","lang":"en","doc_type":"code","stars":996,"dataset":"github-code","pt":"81"} +{"seq_id":"30483832686","text":"### Script for training transformation of prod data - concats all data from dir for prod name, num and price ###\nimport os\nimport pandas as pd\nimport numpy as np\nfrom os import listdir\nfrom sklearn.model_selection import train_test_split\n\nclasses = ['PROD_NAME', 'PROD_NUM', 'UNIT_PRICE', 'PROD_BARCODE_NUMBER', 'OTHER']\n\ndf = pd.DataFrame(columns=['PRODUCTS', '','','',''])\nclasses_series = pd.Series(classes, index=df.columns)\ndf = df.append(classes_series, ignore_index=True)\ndf.to_csv('test1.csv', index=False)\n\nPATH = r'C:\\Users\\mail\\PycharmProjects\\MLDM\\Data\\Raw Data'\nlist_files = listdir(PATH)\n\ndata_list = []\ni = 0\nfor j in range(len(list_files)):\n print(i + len(list_files), 'of', (len(list_files)), \"files remaining.\")\n dataset_filename = os.listdir(PATH)[j]\n print(dataset_filename)\n dataset_path = os.path.join(\"../../..\", PATH, dataset_filename)\n data = pd.read_csv(dataset_path, error_bad_lines=False, engine='c', encoding=\"UTF-8\", low_memory=False,\n skiprows=1)\n data = data[['PROD_NAME', 'PROD_NUM', 'UNIT_PRICE']]\n data.drop_duplicates(subset=['PROD_NAME'], inplace=True)\n data.drop_duplicates(subset=['PROD_NUM'], inplace=True)\n data.drop_duplicates(subset=['UNIT_PRICE'], inplace=True)\n data_list.append(data)\n i -= 1\n\ndfc = pd.concat(data_list)\nprint(df.shape)\ndfc.to_csv('test.csv', index=False)\n","repo_name":"JonasRosenzweig/MLDM","sub_path":"Miscellaneous Code/Data Transformation/final_transform.py","file_name":"final_transform.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37002827200","text":"from __future__ import annotations\n\nimport os\nfrom subprocess import Popen, PIPE, STDOUT\nimport sys\nfrom typing import Optional\n\n\nLETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nWINDOWS_COMPILERS = ['cl']\n\n\ndef compile_dll(path_to_file: str, output: Optional[str] = None) -> Optional[str]:\n \"\"\"\n\n :param path_to_file:\n :param output:\n :return:\n \"\"\"\n if os.path.isfile(path_to_file):\n vcvars = get_vcvars()\n if vcvars is not None:\n cl = f'cl.exe /LD {path_to_file}'\n if output is not None:\n cl += f' /F {output}'\n commands = ' && '.join((vcvars, cl, 'exit 0'))\n commands = f'({commands}) || exit 1\\r\\n'\n out = execute_commands(commands, encoding='latin1')\n if out is not None:\n if output is not None:\n dll_path = output\n else:\n dll_path = path_to_file.rsplit('.', 1)\n dll_path[-1] = 'dll'\n return '.'.join(dll_path)\n else:\n return None\n else:\n return None\n else:\n raise FileNotFoundError(f\"File {path_to_file} does not exist\")\n\n\ndef execute_commands(commands: str, encoding: Optional[str] = None, errors: str = 'strict') -> str:\n \"\"\"\n\n :param commands:\n :param encoding:\n :param errors:\n :return:\n \"\"\"\n text_mode = (encoding is None)\n with Popen('cmd.exe', stdin=PIPE, stdout=PIPE, stderr=STDOUT, universal_newlines=text_mode) as process:\n if not text_mode:\n commands = commands.encode(encoding, errors)\n out, _ = process.communicate(commands)\n # Somehow err (2. output of communicate()) can be not empty although everything worked and is also not really\n # storing errors\n # NOTE: Workaround by using 'exit 0' and 'exit 1' in 'cmd'\n if process.returncode != 0:\n if not text_mode:\n out = out.decode(encoding, errors)\n raise RuntimeError(f\"Could not execute commands\"\n f\"{out}\"\n f\"Return value: {process.returncode}\")\n return out if text_mode else out.decode(encoding, errors)\n\n\ndef find_files(name: str) -> (list[str], list[str]):\n \"\"\"\n\n :param name:\n :return:\n \"\"\"\n loc = []\n date = []\n drives = get_hard_drives()\n for drive in drives:\n for root, dirs, files in os.walk(drive):\n for file in files:\n if file == name:\n path_to_file = root + '\\\\' + file\n loc.append(path_to_file)\n date.append(os.path.getctime(path_to_file))\n if len(loc) != len(date):\n raise ValueError(\"List 'loc' and 'list' date must be of same length\")\n return loc, date\n\n\ndef get_hard_drives() -> list[str]:\n \"\"\"\n\n :return:\n \"\"\"\n return [f'{drive}:\\\\' for drive in LETTERS if os.path.exists(f'{drive}:\\\\')]\n\n\ndef get_vcvars() -> str:\n \"\"\"\n\n :return:\n \"\"\"\n files, dates = find_files('vcvarsall.bat')\n arch = '64bit' if sys.maxsize > 2 ** 32 else '32bit'\n if files:\n index_max = max(range(len(dates)), key=dates.__getitem__)\n chosen = files[index_max]\n if ' ' in chosen:\n if arch == '64bit':\n return f'\"{chosen}\" x64'\n else:\n return f'\"{chosen}\" x32'\n else:\n if arch == '64bit':\n return f'{chosen} x64'\n else:\n return f'{chosen} x32'\n else:\n raise RuntimeError(\"Compiler not found\")\n","repo_name":"hilo-mpc/hilo-mpc","sub_path":"hilo_mpc/util/windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"81"} +{"seq_id":"5089985205","text":"\"\"\"Setup script for intelino trainlib.\"\"\"\n\nfrom setuptools import setup, find_namespace_packages\n\n\nwith open(\"README.md\", encoding=\"utf-8\") as f:\n text = f.read()\n # update relative image path to url for pypi\n img_ref = \"[main-img]: ./\"\n img_url = \"[main-img]: https://raw.githubusercontent.com/intelino-code/intelino-trainlib-py/master/\"\n long_description = text.replace(img_ref, img_url)\n\n\nREQUIREMENTS = [\n \"intelino-trainlib-async\",\n]\n\nsetup(\n name=\"intelino-trainlib\",\n version=\"1.0.0\",\n description=\"Python library (SDK) for interacting with the intelino smart train.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"intelino\",\n author_email=\"developer@intelino.com\",\n license=\"Intelino Public License\",\n url=\"https://intelino.com\",\n project_urls={\n \"Documentation\": \"https://intelino-trainlib-py.readthedocs.io/\",\n \"Source Code\": \"https://github.com/intelino-code/intelino-trainlib-py\",\n \"Examples\": \"https://github.com/intelino-code/intelino-trainlib-py-examples\",\n \"Intelino Lab\": \"https://lab.intelino.com\",\n },\n packages=find_namespace_packages(include=[\"intelino*\"]),\n python_requires=\">=3.7\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"License :: Other/Proprietary License\",\n \"Operating System :: Microsoft :: Windows :: Windows 10\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Typing :: Typed\",\n ],\n install_requires=REQUIREMENTS,\n)\n","repo_name":"intelino-code/intelino-trainlib-py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"4260155182","text":"from logging import getLogger\nfrom urllib.parse import urljoin, urlparse\n\nimport requests\nfrom mohawk import Sender\nfrom requests.auth import AuthBase\nfrom requests.exceptions import ConnectionError\n\nfrom datahub.core.exceptions import APIBadGatewayException\n\nlogger = getLogger(__name__)\n\n\nclass HawkAuth(AuthBase):\n \"\"\"Hawk authentication class.\"\"\"\n\n def __init__(self, api_id, api_key, signing_algorithm='sha256', verify_response=True):\n \"\"\"Initialises the authenticator with the signing parameters.\"\"\"\n self._api_id = api_id\n self._api_key = api_key\n self._signing_algorithm = signing_algorithm\n self._verify_response = verify_response\n\n def __call__(self, request):\n \"\"\"Signs a request, and attaches a response verifier.\"\"\"\n credentials = {\n 'id': self._api_id,\n 'key': self._api_key,\n 'algorithm': self._signing_algorithm,\n }\n\n sender = Sender(\n credentials,\n request.url,\n request.method,\n content=request.body or '',\n content_type=request.headers.get('Content-Type', ''),\n )\n\n request.headers['Authorization'] = sender.request_header\n if self._verify_response:\n request.register_hook('response', _make_response_verifier(sender))\n\n return request\n\n\nclass TokenAuth(AuthBase):\n \"\"\"\n Token authentication class.\n \"\"\"\n\n def __init__(self, token, token_keyword='Token'):\n \"\"\"\n Initialise the class with the token.\n \"\"\"\n self.token = token\n self.token_keyword = token_keyword\n\n def __call__(self, request):\n \"\"\"\n Inject the Authorization header in to the request.\n \"\"\"\n request.headers['Authorization'] = f'{self.token_keyword} {self.token}'\n return request\n\n\ndef _make_response_verifier(sender):\n def verify_response(response, *args, **kwargs):\n if response.ok:\n sender.accept_response(\n response.headers['Server-Authorization'],\n content=response.content,\n content_type=response.headers['Content-Type'],\n )\n\n return verify_response\n\n\nclass APIClient:\n \"\"\"Generic API client.\"\"\"\n\n # Prefer JSON to other content types\n DEFAULT_ACCEPT = 'application/json;q=0.9,*/*;q=0.8'\n\n def __init__(\n self,\n api_url,\n auth=None,\n accept=DEFAULT_ACCEPT,\n default_timeout=None,\n raise_for_status=True,\n request=None,\n ):\n \"\"\"Initialises the API client.\"\"\"\n self._api_url = api_url\n self._auth = auth\n self._accept = accept\n self._default_timeout = default_timeout\n self._raise_for_status = raise_for_status\n self._request = request\n\n def request(self, method, path, **kwargs):\n \"\"\"Makes an HTTP request.\"\"\"\n url = urljoin(self._api_url, path)\n\n logger.info(f'Sending request: {method.upper()} {url}')\n\n timeout = kwargs.pop('timeout', self._default_timeout)\n\n headers = kwargs.pop('headers', {})\n if self._accept:\n headers['Accept'] = self._accept\n if self._request:\n headers.update(get_zipkin_headers(self._request))\n\n try:\n response = requests.request(\n method,\n url,\n auth=self._auth,\n headers=headers,\n timeout=timeout,\n **kwargs,\n )\n except ConnectionError as e:\n logger.exception(e)\n raise APIBadGatewayException(\n f'Upstream service unavailable: {urlparse(url).netloc}',\n ) from e\n logger.info(f'Response received: {response.status_code} {method.upper()} {url}')\n if self._raise_for_status:\n response.raise_for_status()\n return response\n\n\ndef get_zipkin_headers(request):\n \"\"\"\n Parsers the request object and extracts Zipkin headers.\n\n :param request: The request object\n \"\"\"\n if not request:\n return {}\n\n keys = [\n 'x-b3-traceid',\n 'x-b3-spanid',\n ]\n headers = {\n key: request.headers.get(key)\n for key in keys if key in request.headers\n }\n return headers\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/core/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"34949485462","text":"from collections import OrderedDict\n\n\nTABLE_NAME = {'MEAL': 0, 'CALORIES': 1, 'PROTEINS': 2, 'CARBS': 3, 'FATS': 4, 'AMOUNT': 5}\n\nclass MealParser(object):\n\tdef __init__(self, meals_file):\n\t\tself.file = meals_file\n\t\tself.meals_dict = OrderedDict()\n\n\tdef _split_meal(self, line, given_order, count):\n\t\tmeal = line.split(';')\n\t\tfor i in range(count):\n\t\t\tmeal[i] = meal[i].rstrip()\n\t\tmeal = self._in_order(meal, given_order, count)\n\t\treturn meal[0], meal[1:5]\n\n\tdef read_meals_from_file(self):\n\t\tfile = open(self.file, 'r')\n\t\tcolumns = file.readline().split(';')\n\t\tgiven_order = [-1] * 5\n\t\tcount = len(columns) - 1\n\t\tfor i in range(count):\n\t\t\tcolumns[i] = columns[i].rstrip().replace(' ', '').upper()\n\t\t\tgiven_order[i] = TABLE_NAME[columns[i]]\n\n\t\tfor line in file:\n\t\t\tname, values = self._split_meal(line, given_order, count)\n\t\t\tself.meals_dict[name] = values\n\t\tfile.close()\n\t\treturn self.meals_dict\n\n\t@staticmethod\n\tdef _in_order(meal, given_order, count):\n\t\tnew_values = [-1] * count\n\t\tfor index in range(count):\n\t\t\tfor i in [i for i, x in enumerate(given_order) if x == index]:\n\t\t\t\tnew_values[index] = meal[i]\n\t\tfor i in range(1, 5):\n\t\t\ttry:\n\t\t\t\tnew_values[i] = float((new_values[i].replace(' ', '')).replace(',', '.'))\n\t\t\texcept ValueError:\n\t\t\t\traise ValueError('Wrong value. Make sure you use integer.')\n\t\treturn new_values\n","repo_name":"KonradKap/ALHE","sub_path":"meal_parser.py","file_name":"meal_parser.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40076534087","text":"# SPDX-License-Identifier: MIT\nimport argparse\nimport re\nfrom typing import Dict, List, Optional\n\nfrom ..database import Database\nfrom ..diagservice import DiagService\nfrom ..exceptions import odxraise\nfrom ..odxtypes import ParameterValue\nfrom ..singleecujob import SingleEcuJob\nfrom . import _parser_utils\n\n# name of the tool\n_odxtools_tool_name_ = \"decode\"\n\n\ndef get_display_value(v: ParameterValue) -> str:\n if isinstance(v, bytes):\n return v.hex(\" \")\n elif isinstance(v, int):\n return f\"{v} (0x{v:x})\"\n else:\n return str(v)\n\n\ndef print_summary(\n odxdb: Database,\n ecu_variants: Optional[List[str]] = None,\n data: bytes = b'',\n decode: bool = False,\n) -> None:\n ecu_names = ecu_variants if ecu_variants else [ecu.short_name for ecu in odxdb.ecus]\n service_db: Dict[str, DiagService] = {}\n service_ecus: Dict[str, List[str]] = {}\n for ecu_name in ecu_names:\n ecu = odxdb.ecus[ecu_name]\n if not ecu:\n print(f\"The ecu variant '{ecu_name}' could not be found!\")\n continue\n if data:\n found_services = ecu._find_services_for_uds(data)\n for found_service in found_services:\n ecu_names = service_ecus.get(found_service.short_name, [])\n ecu_names.append(ecu_name)\n service_ecus[found_service.short_name] = ecu_names\n service_db[found_service.short_name] = found_service\n\n print(f\"Binary data: {data.hex(' ')}\")\n for service_name, ecu_names in service_ecus.items():\n service = service_db[service_name]\n if isinstance(service, DiagService):\n print(\n f\"Decoded by service '{service.short_name}' (decoding ECUs: {', '.join(ecu_names)})\"\n )\n elif isinstance(service, SingleEcuJob):\n print(\n f\"Decoded by single ecu job '{service.short_name}' (decoding ECUs: {', '.join(ecu_names)})\"\n )\n else:\n print(f\"Decoded by unknown diagnostic communication: '{service.short_name}' \"\n f\"(decoding ECUs: {', '.join(ecu_names)})\")\n\n if decode:\n if data is None:\n odxraise(\"Data is required for decoding\")\n\n decoded = service.decode_message(data)\n print(f\"Decoded data:\")\n for param_name, param_value in decoded.param_dict.items():\n print(f\" {param_name}={get_display_value(param_value)}\")\n\n\ndef add_subparser(subparsers: \"argparse._SubParsersAction\") -> None:\n parser = subparsers.add_parser(\n \"decode\",\n description=\"\\n\".join([\n \"Decode request by hex-data\",\n \"\",\n \"Examples:\",\n \" For displaying the service associated with the request 10 01 & decoding it:\",\n \" odxtools decode ./path/to/database.pdx -D -d '10 01'\",\n \" For displaying the service associated with the request 10 01, without decoding it:\",\n \" odxtools decode ./path/to/database.pdx -d '10 01'\",\n \" For more information use:\",\n \" odxtools decode -h\",\n ]),\n help=\"Find & print service by hex-data. Can also decode the hex-data to into their named parameters.\",\n formatter_class=argparse.RawTextHelpFormatter,\n )\n _parser_utils.add_pdx_argument(parser)\n\n parser.add_argument(\n \"-v\",\n \"--variants\",\n nargs=1,\n metavar=\"VARIANT\",\n required=False,\n help=\"Specifies which ecu variants should be included.\",\n default=\"all\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--data\",\n metavar=\"DATA\",\n required=True,\n help=\"Specify data of hex request\",\n )\n\n parser.add_argument(\n \"-D\",\n \"--decode\",\n action=\"store_true\",\n required=False,\n help=\"Decode the given hex data\",\n )\n\n\ndef hex_to_binary(data_str: str) -> bytes:\n return bytes.fromhex(data_str)\n\n\ndef run(args: argparse.Namespace) -> None:\n data = bytes.fromhex(re.sub('[^0-9a-fA-F]', '', args.data))\n odxdb = _parser_utils.load_file(args)\n variants = args.variants\n\n print_summary(\n odxdb,\n ecu_variants=None if variants == \"all\" else variants,\n data=data,\n decode=args.decode,\n )\n","repo_name":"mercedes-benz/odxtools","sub_path":"odxtools/cli/decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"81"} +{"seq_id":"12527211216","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 1 10:49:47 2019\n\n@author: 3602786\n\"\"\"\n\n# 1-bleu 2-vert 3-rouge\nfrom constraint import *\nimport numpy as np\nimport copy\nimport time\n\ndef solve_p1():\n p = Problem();\n p.addVariable(\"a\",[3,1])\n p.addVariable(\"b\",[2,1])\n p.addVariable(\"c\",[2,1])\n p.addVariable(\"d\",[3])\n \n p.addConstraint(AllDifferentConstraint(),[\"a\",\"b\"])\n p.addConstraint(AllDifferentConstraint(),[\"a\",\"c\"])\n p.addConstraint(AllDifferentConstraint(),[\"a\",\"d\"])\n p.addConstraint(AllDifferentConstraint(),[\"b\",\"c\"])\n p.addConstraint(AllDifferentConstraint(),[\"c\",\"d\"])\n \n s = p.getSolutions()\n return s\n\n#print(solve_p1())\n\ndef solve_p2():\n p = Problem();\n p.addVariable(\"a\",[3,2])\n p.addVariable(\"b\",[2,1])\n p.addVariable(\"c\",[2,1])\n p.addVariable(\"d\",[2])\n \n p.addConstraint(AllDifferentConstraint(),[\"a\",\"b\"])\n p.addConstraint(AllDifferentConstraint(),[\"a\",\"c\"])\n p.addConstraint(AllDifferentConstraint(),[\"a\",\"d\"])\n p.addConstraint(AllDifferentConstraint(),[\"b\",\"c\"])\n p.addConstraint(AllDifferentConstraint(),[\"c\",\"d\"])\n \n s = p.getSolutions()\n return s\n\n#print(solve_p2())\n\nclass Zone(object):\n def __init__(self, voisins = [], utilite = {}):\n self.voisins = voisins\n self.utilite = utilite\n\nz1 = Zone(); z2 = Zone(); z3 = Zone(); z4 = Zone()\nz1.voisins = [z2,z3,z4]; z1.utilite = {'b':1, 'g':0.3, 'r':0.8}\nz2.voisins = [z1,z3]; z2.utilite = {'b':0.2, 'g':0.6, 'r':1}\nz3.voisins = [z1,z2,z4]; z3.utilite = {'b':1, 'g':0.7, 'r':0.4}\nz4.voisins = [z1,z3]; z4.utilite = {'b':0.5, 'g':1, 'r':0.9}\n \nzones = [z1,z2,z3,z4]\nmaxi = 0\ncurrent_value = 0\ncouleurs = []\n\ndef evaluation(zones,maxi):\n for zone in zones:\n maxi += max(zone.utilite.values())\n return maxi\n\ndef bb(zones,maxi,current_value,couleurs):\n #feuille atteinte\n if zones == []:\n return max(maxi,current_value),couleurs\n \n tmp_couleurs = copy.deepcopy(couleurs)\n\n #rangement des cles du noeud courant par leur valeur\n sorted_d = sorted((value, key) for (key,value) in zones[0].utilite.copy().items())\n sorted_d = sorted(sorted_d,reverse=True)\n\n for valeur,couleur in sorted_d:\n \n #copie des parametres qui seront modifies\n zones2 = copy.deepcopy(zones)\n noeud = zones2.pop(0)\n current_value2 = current_value + noeud.utilite[couleur]\n couleurs2 = copy.deepcopy(tmp_couleurs)\n couleurs2.append(couleur)\n \n #comparaison avec la borne superieure de la branche, passer si branche inutile \n print(maxi,evaluation(zones2,current_value2))\n if(maxi > evaluation(zones2,current_value2)):\n print('break')\n break\n \n #enlever la couleur des voisins du noeud, passer si pas de solution possible\n for voisin in noeud.voisins:\n if voisin.utilite.pop(couleur,None) == None:\n break\n \n #recursion avec les nouveaux parametres\n utilite_branche, couleurs_branche = bb(zones2,maxi,current_value2,couleurs2)\n \n #stockage si meilleur\n if utilite_branche > maxi:\n maxi = utilite_branche\n couleurs = couleurs_branche\n \n return maxi,couleurs\ndebut=time.time()\na,b = bb(zones,maxi,maxi,couleurs)\nprint(\"Solution d'utilite {} et de couleurs {}\".format(a,b))\nprint(time.time()-debut)\n\n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"ljp95/masters-assignments","sub_path":"Trees and graphs algorithms/csp - branch and bound.py","file_name":"csp - branch and bound.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21270848918","text":"import matplotlib.pyplot as plt\nfrom gala_apples import total_gala_apples2019, total_gala_apples2018, total_gala_apples2017\n\n\n\ndef gala_apples():\n #name = \"Gala Apple Sales Over 3 Years\"\n print(total_gala_apples2017)\n print(total_gala_apples2018)\n print(total_gala_apples2019)\n\n labels = '2019', '2018', '2017'\n sizes = [total_gala_apples2019,total_gala_apples2018, total_gala_apples2017 ]\n tot = sum(sizes)/100.0\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, labels=labels, autopct=lambda x: \"%d\" % round(x*tot), shadow=True,\n startangle=90)\n ax1.axis('equal')\n plt.title(\"Total Gala Apples Sold Over 3 Years\")\n\n\n plt.show()","repo_name":"nia-iott/NiaFruitSale","sub_path":"graph_gala_apples.py","file_name":"graph_gala_apples.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72842226825","text":"# 导入函数库\nimport pickle\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport re\nfrom pylab import mpl\nmpl.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体\nmpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\nimport os\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\nclass LBY_PositionInfo:\n contract_code = ''\n symbol_type = ''\n avg_entry_price = 0\n current_contracts = 0 # 由于持仓被分成多空两类,那么这里的头寸就不带正负号了,而是绝对值\n last_trading_day_close_price = 0\n\n def __init__(self, ContractCode):\n self.contract_code = ContractCode\n self.symbol_type = ContractCode[:-4]\n self.avg_entry_price = 0\n self.current_contracts = 0 # 由于持仓被分成多空两类,那么这里的头寸就不带正负号了,而是绝对值\n self.last_trading_day_close_price = 0\n\n def PrintInfo(self):\n print(f\"PositionDetail: {self.contract_code}, {self.current_contracts}, {self.avg_entry_price}\")\n\n\nclass LBY_BackTester_QH:\n ###### 本回测框架只适用于下午收盘后判断调仓逻辑,下一交易日上午和下午指定时间点调仓 ######\n ###### 注意:聚宽K线的时间戳是K线结束的时间,而非K线生成时刻 ######\n\n ''' 策略-参数区 '''\n # parameter_symbol_type_list = ['RB', 'HC', 'I', 'J', 'JM', 'ZC', 'FG', 'MA', 'TA', 'L', 'PP', 'BU', 'RU', 'CU', 'AL', 'ZN', 'PB',\n # 'NI', 'SN', 'M', 'Y', 'RM', 'OI', 'P', 'A', 'C', 'CS', 'CF', 'SR', 'JD','EG','V']\n parameter_length = {'RB': 3, 'CU': 4}\n ''' 策略-变量区 '''\n basis_price_df = {}\n delivery_date_df = {}\n # var_score = {}\n\n def __init__(self, start_cash=10000000, rebalance_times=['9:01'], backtest_begin_date='2015-01-01',\n backtest_end_date='2015-12-31',parameter_list = []):\n # print(\"本回测框架只适用于下午收盘后判断调仓逻辑,下一交易日上午和下午指定时间点调仓\\n\")\n # print(\"注意:聚宽K线的时间戳是K线结束的时间,而非K线生成时刻\\n\")\n self.BackTestStartCash = start_cash\n self.BackTestRebalanceTimeList = rebalance_times\n self.BackTestBeginDate = backtest_begin_date\n self.BackTestEndDate = backtest_end_date\n self.TotalPortfolioCapital = start_cash\n self.parameter_symbol_type_list = parameter_list\n self.AllOrderRecordList_specific = {\n 'time': [],\n 'buy/sell': [],\n 'contract_code': [],\n 'order_lots': [],\n 'order_price': [],\n 'trading_fee': [],\n }\n self.var_score={}\n for type in self.parameter_symbol_type_list:\n self.var_score[type] = 0\n\n # 实例变量\n self.DailyTotalPortfolioCapital = {} # 存储每日收盘后的账户动态权益\n self.TotalTradingFees = 0 # 累计交易成本\n self.DailyTotalTradingFees = {} # 存储每日收盘后的累计交易成本\n self.AllOrderRecordList = [] # 存储所有交易记录\n\n self.CurrentPosition_L = {} # 存储实时的期货多头持仓,持仓信息是LBY_PositionInfo实例\n self.DailyPosition_L = {} # 存储每日收盘后的期货多头持仓信息,持仓信息是LBY_PositionInfo实例\n self.CurrentPosition_S = {} # 存储实时的期货空头持仓,持仓信息是LBY_PositionInfo实例\n self.DailyPosition_S = {} # 存储每日收盘后的期货空头持仓信息,持仓信息是LBY_PositionInfo实例\n\n self.TradingDaysCounter = 0 # 从回测的那天起,第几个交易日。\n self.AllDominantContractCodeDic = {} # 存储所有日期主力合约信息\n self.DominantContractCodeDic = {} # 存储当日主力合约信息\n self.CurrentYear_1mKbarInfoDic = {} # 存储当前年份的所有日内K线信息\n\n self.order_info_columns = ('code', 'from_lots', 'to_lots')\n self.order_info_long = pd.DataFrame(columns=self.order_info_columns)\n self.order_info_short = pd.DataFrame(columns=self.order_info_columns)\n self.intraday_1m_bars = {} # 格式为pannel,即使单合约我也增加了'CU8888.XSGE'变成两个合约,这样返回结果一定是pannel格式\n self.traded_counter = 0 # 当天已交易次数\n\n\n # 统一类变量\n ''' 回测框架-参数区 '''\n BackTestStartCash = 10000 # 初始账户资金\n BackTestRebalanceTimeList = ['09:01'] # 时间戳为列表里时间的K线完成时触发调仓逻辑,每个时间格式hh:mm,比如'09:15'\n BackTestBeginDate = \"2010-01-01\"\n BackTestEndDate = \"2020-01-10\"\n BackTestFeeRatio = 1.0 / 10000\n BackTestSlipTicks = 1\n\n\n\n ''' 回测框架-变量区 '''\n # starting_trade_date = {'PB': datetime.date(2014, 7, 25),\n # 'SN': datetime.date(2015, 12, 21),\n # 'HC': datetime.date(2015, 12, 21),\n # 'FU': datetime.date(2018, 7, 17),\n # 'BU': datetime.date(2015, 3, 2),\n # 'CS': datetime.date(2015, 9, 29),\n # 'V': datetime.date(2016, 9, 12),\n # 'J': datetime.date(2012, 6, 20),\n # 'ZC': datetime.date(2015, 11, 19),\n # 'MA': datetime.date(2014, 11, 19),\n # 'OI': datetime.date(2013, 6, 3)}\n starting_trade_date={'BU':datetime.date(2014,9,16),\n 'B':datetime.date(2018,3,1)}\n SymbolMultiplier = {'RB': 10, 'HC': 10, 'I': 100, 'J': 100, 'JM': 60, 'ZC': 100, 'FG': 20, 'MA': 10, 'TA': 5,\n 'L': 5, 'PP': 5, 'BU': 10, 'RU': 10, 'CU': 5, 'AL': 5, 'ZN': 5, 'PB': 5, 'NI': 1, 'SN': 1,\n 'M': 10, 'Y': 10, 'RM': 10, 'OI': 10, 'P': 10, 'A': 10, 'C': 10, 'CS': 10, 'CF': 5, 'SR': 10,\n 'JD': 10, 'V': 5, 'EG': 10, 'AG': 15, 'AU': 1000, 'SC': 1000, 'B': 10, 'IF': 300, 'IH': 300,\n 'IC': 200, 'FU': 10, 'SA': 20, 'SS': 5, 'EB': 5, 'SP': 10, 'UR': 20, 'AP': 10, 'SM': 5, 'SF': 5,\n 'PG': 20, 'RI': 20, 'WH': 20, 'CJ': 5, 'LH': 16, 'NR': 10, 'PK': 5, 'PF': 5, }\n SymbolMinMovePoint = {'RB': 1, 'HC': 1, 'I': 0.5, 'J': 0.5, 'JM': 0.5, 'ZC': 0.2, 'FG': 1, 'MA': 1, 'TA': 2, 'L': 5,\n 'PP': 1, 'BU': 2, 'RU': 5, 'CU': 10, 'AL': 5, 'ZN': 5, 'PB': 5, 'NI': 10, 'SN': 10, 'M': 1,\n 'Y': 2, 'RM': 1, 'OI': 1, 'P': 2, 'A': 1, 'C': 1, 'CS': 1, 'CF': 5, 'SR': 1, 'JD': 1, 'V': 5,\n 'EG': 1, 'AG': 1, 'AU': 0.02,\n 'SC': 0.01, 'B': 1, 'IF': 0.2, 'IH': 0.2, 'IC': 0.2, 'FU': 1, 'SA': 1, 'SS': 5, 'EB': 1,\n 'SP': 2, 'UR': 1, 'AP': 1, 'SM': 2, 'SF': 2, 'PG': 1,'RI':1,'WH': 1, 'CJ': 5, 'LH': 5, 'NR': 5, 'PK': 2, 'PF': 2,}\n ''' 函数区 '''\n\n def reset_order_info(self):\n if len(self.order_info_long) > 0:\n # self.order_info_long = pd.DataFrame(columns=self.order_info_columns)\n self.order_info_long.drop(self.order_info_long.index, inplace=True)\n if len(self.order_info_short) > 0:\n # self.order_info_short = pd.DataFrame(columns=self.order_info_columns)\n self.order_info_short.drop(self.order_info_short.index, inplace=True)\n\n def compare_starting_trade_date(self, symbol, current_dt):\n if symbol not in self.starting_trade_date.keys():\n return True\n else:\n trade_day = self.starting_trade_date[symbol]\n delta = datetime.date(current_dt.year, current_dt.month, current_dt.day) - trade_day\n if delta.days > 0:\n return True\n else:\n return False\n\n def LBY_PrepareData(self):\n ## 读取交易信号\n signal = pd.read_csv(\"signal/水位rank_01_极值.csv\", index_col=0)\n self.signal = signal\n\n\n def LBY_BeforeTradingDayBegin(self, current_dt):\n self.TradingDaysCounter = self.TradingDaysCounter + 1\n\n\n def During_Tradingday(self,current_dt):\n self.LBY_GenerateOrders(current_dt)\n\n def LBY_AfterTradingDayEnd(self, current_dt):\n my_time = datetime.datetime(current_dt.year, current_dt.month, current_dt.day, 15, 0, 0)\n signal_bar = self.signal[self.signal.index == str(current_dt)[:10]]\n if len(signal_bar) == 0:\n for symbol in self.parameter_symbol_type_list:\n self.var_score[symbol] = np.nan\n else:\n dominant_bar = pd.DataFrame(self.DominantContractCodeDic, index=['dominant_code'])\n total = pd.concat([signal_bar, dominant_bar], axis=0)\n total.dropna(inplace=True, axis=1)\n\n if len(total.columns) < 1:\n for symbol in self.parameter_symbol_type_list:\n self.var_score[symbol] = 0\n else:\n total = total.iloc[[0]]\n long_number = total[total > 0].sum(axis=1)[0]\n short_number = total[total < 0].sum(axis=1)[0]\n total_number = long_number + abs(short_number)\n\n for symbol in self.parameter_symbol_type_list:\n if (not self.compare_starting_trade_date(symbol, current_dt)) or (\n symbol not in self.DominantContractCodeDic.keys()):\n continue\n else:\n if signal_bar[symbol][0] == 1:\n code = self.DominantContractCodeDic[symbol]\n price = self.intraday_1m_bars.loc[my_time, code]\n if self.var_score.get(symbol) > 0:\n continue\n else:\n self.var_score[symbol] = int(\n self.BackTestStartCash / (total_number * price * self.SymbolMultiplier[code[:-4]]))\n\n elif signal_bar[symbol][0] == -1:\n code = self.DominantContractCodeDic[symbol]\n price = self.intraday_1m_bars.loc[my_time, code]\n\n if self.var_score.get(symbol) < 0:\n continue\n else:\n self.var_score[symbol] = -int(self.BackTestStartCash / (\n total_number * price * self.SymbolMultiplier[code[:-4]]))\n\n else:\n self.var_score[symbol] = signal_bar[symbol][0]\n self.LBY_GenerateOrders(current_dt)\n\n\n\n # 策略核心逻辑开始\n # self.LBY_GenerateOrders(current_dt)\n\n\n #\n # for symbol in self.parameter_symbol_type_list:\n # # mylength = self.parameter_length[symbol]\n # # mylength = 5\n # # remainder = self.TradingDaysCounter % (mylength * 2)\n #\n # if len(temp)==0 or number<5:\n # self.var_score[symbol] = np.nan\n # else:\n # if temp[symbol][0]==1:\n # long_number+=1\n # elif temp[symbol][0]==-1:\n # short_number+=1\n # self.var_score[symbol] = temp[symbol][0]\n # self.var_score[symbol] = remainder - mylength\n # print(self.var_score)\n # 策略核心逻辑结束\n\n\n def LBY_GenerateOrders(self, current_dt):\n self.reset_order_info()\n my_time = datetime.datetime(current_dt.year, current_dt.month, current_dt.day, 9, 1, 0)\n\n # 将持仓的合约类型与代码关联\n symbol_contract_map_L = {}\n symbol_contract_map_S = {}\n current_position_code_list = list(self.CurrentPosition_L.keys())\n for code in current_position_code_list:\n symbol_contract_map_L[code[:-4]] = code\n current_position_code_list = list(self.CurrentPosition_S.keys())\n for code in current_position_code_list:\n symbol_contract_map_S[code[:-4]] = code\n\n for symbol in self.parameter_symbol_type_list:\n if (not self.compare_starting_trade_date(symbol, current_dt)) or (\n symbol not in self.DominantContractCodeDic.keys()):\n continue\n myscore = self.var_score[symbol]\n main_contract_code = self.DominantContractCodeDic[symbol]\n # main_contract_price = self.intraday_1m_bars['close'].loc[my_time, main_contract_code]\n\n # main_contract_price = self.intraday_1m_bars.loc[my_time, main_contract_code]\n\n target_lots = myscore\n # 获取当前持仓信息\n current_position_code_L = ''\n current_position_lots_L = 0\n current_position_code_S = ''\n current_position_lots_S = 0\n current_position_flag = 0\n if symbol in symbol_contract_map_L.keys():\n current_position_code_L = symbol_contract_map_L[symbol]\n current_position_lots_L = self.CurrentPosition_L[current_position_code_L].current_contracts\n if symbol in symbol_contract_map_S.keys():\n current_position_code_S = symbol_contract_map_S[symbol]\n current_position_lots_S = self.CurrentPosition_S[current_position_code_S].current_contracts\n if current_position_lots_L > 0:\n current_position_flag = 1\n elif current_position_lots_S > 0:\n current_position_flag = -1\n\n # 计算调仓信息\n if current_position_flag == 0:\n # 当前无持仓\n if target_lots > 0:\n # 新开多\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[2]: target_lots}],\n ignore_index=True)\n elif target_lots < 0:\n # 新开空\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[2]: abs(\n target_lots)}], ignore_index=True)\n elif current_position_flag == 1:\n # 当前持有多头头寸\n if current_position_lots_L == target_lots:\n # 新老仓位一样的话,只需要判断是否需要移仓换月\n if main_contract_code != current_position_code_L:\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[0]: current_position_code_L,\n self.order_info_columns[1]: current_position_lots_L,\n self.order_info_columns[2]: 0}],\n ignore_index=True)\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[2]: current_position_lots_L}],\n ignore_index=True)\n else: # 新老仓位不一样\n if main_contract_code != current_position_code_L:\n # 需要移仓,先把老仓位平掉\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[\n 0]: current_position_code_L,\n self.order_info_columns[\n 1]: current_position_lots_L,\n self.order_info_columns[2]: 0}],\n ignore_index=True)\n if target_lots > 0:\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[\n 2]: target_lots}],\n ignore_index=True)\n elif target_lots < 0:\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[2]: abs(\n target_lots)}],\n ignore_index=True)\n else:\n # 不需要移仓,只需要同合约调仓\n if target_lots >= 0:\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[\n 1]: current_position_lots_L,\n self.order_info_columns[\n 2]: target_lots}],\n ignore_index=True)\n else:\n # 先把多头持仓全部平掉,再开空\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[\n 1]: current_position_lots_L,\n self.order_info_columns[2]: 0}],\n ignore_index=True)\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[2]: abs(\n target_lots)}],\n ignore_index=True)\n elif current_position_flag == -1:\n # 当前持有空头头寸\n if -current_position_lots_S == target_lots:\n # 新老仓位一样的话,只需要判断是否需要移仓换月\n if main_contract_code != current_position_code_S:\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: current_position_code_S,\n self.order_info_columns[\n 1]: current_position_lots_S,\n self.order_info_columns[2]: 0}],\n ignore_index=True)\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[\n 2]: current_position_lots_S}],\n ignore_index=True)\n else: # 新老仓位不一样\n if main_contract_code != current_position_code_S:\n # 需要移仓,先把老仓位平掉\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: current_position_code_S,\n self.order_info_columns[\n 1]: current_position_lots_S,\n self.order_info_columns[2]: 0}],\n ignore_index=True)\n if target_lots < 0:\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[2]: abs(\n target_lots)}],\n ignore_index=True)\n elif target_lots > 0:\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[2]: abs(\n target_lots)}], ignore_index=True)\n else:\n # 不需要移仓,只需要同合约调仓\n if target_lots <= 0:\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[\n 1]: current_position_lots_S,\n self.order_info_columns[2]: abs(\n target_lots)}],\n ignore_index=True)\n else:\n # 先把空头持仓全部平掉,再开多\n self.order_info_short = self.order_info_short.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[\n 1]: current_position_lots_S,\n self.order_info_columns[2]: 0}],\n ignore_index=True)\n self.order_info_long = self.order_info_long.append([{self.order_info_columns[\n 0]: main_contract_code,\n self.order_info_columns[1]: 0,\n self.order_info_columns[2]: abs(\n target_lots)}], ignore_index=True)\n\n def LBY_ClearingAfterTradingDayEnd(self, current_dt):\n # print(self.TotalPortfolioCapital)\n # 公共模块开始,以下代码不要改动\n self.traded_counter = 0 # 当天已交易次数重置为0\n # 根据收盘价,结算各头寸当日持有盈亏,并更新各变量\n my_time = datetime.datetime(current_dt.year, current_dt.month, current_dt.day, 15, 0, 0)\n # 多头持仓\n # print('long position:')\n position_key_list = list(self.CurrentPosition_L.keys())\n for position_key in position_key_list:\n position_info = self.CurrentPosition_L[position_key]\n # position_info.PrintInfo()\n if position_info.current_contracts == 0:\n continue\n # today_close_price = self.intraday_1m_bars['close'].loc[my_time, position_key]\n\n today_close_price = self.intraday_1m_bars.loc[my_time, position_key]\n\n # 记录距离昨收价的平仓盈亏\n daily_profit = position_info.current_contracts * (\n today_close_price - position_info.last_trading_day_close_price) * self.SymbolMultiplier[\n position_info.symbol_type]\n # 更新最新权益\n self.TotalPortfolioCapital = self.TotalPortfolioCapital + daily_profit\n position_info.last_trading_day_close_price = today_close_price\n self.CurrentPosition_L[position_key] = position_info\n self.DailyPosition_L[current_dt] = self.CurrentPosition_L.copy()\n # print(self.TotalPortfolioCapital)\n # 空头持仓\n # print('short position:')\n position_key_list = list(self.CurrentPosition_S.keys())\n for position_key in position_key_list:\n position_info = self.CurrentPosition_S[position_key]\n # position_info.PrintInfo()\n if position_info.current_contracts == 0:\n continue\n # today_close_price = self.intraday_1m_bars['close'].loc[my_time, position_key]\n\n\n today_close_price = self.intraday_1m_bars.loc[my_time, position_key]\n\n\n\n\n\n # 记录距离昨收价的平仓盈亏\n daily_profit = (0 - position_info.current_contracts) * (\n today_close_price - position_info.last_trading_day_close_price) * self.SymbolMultiplier[\n position_info.symbol_type]\n # 更新最新权益\n self.TotalPortfolioCapital = self.TotalPortfolioCapital + daily_profit\n position_info.last_trading_day_close_price = today_close_price\n self.CurrentPosition_S[position_key] = position_info\n self.DailyPosition_S[current_dt] = self.CurrentPosition_S.copy()\n # 将收盘后的动态权益和累计费用记入历史\n self.DailyTotalPortfolioCapital[current_dt] = self.TotalPortfolioCapital\n self.DailyTotalTradingFees[current_dt] = self.TotalTradingFees\n # 公共模块结束,以上代码不要改动\n # print(self.TotalPortfolioCapital)\n\n def LBY_Buy(self, OrderTime, ContractCode, OrderLots, OrderPrice, MarketType='QH'):\n if OrderLots <= 0:\n print('error: can not Buy')\n return\n self.AllOrderRecordList.append(f\"{OrderTime}, Buy, {ContractCode}, {OrderLots}, {OrderPrice}\")\n\n # print(self.AllOrderRecordList[-1])\n position_info = self.CurrentPosition_L.get(ContractCode)\n if position_info.contract_code == '':\n position_info.contract_code = ContractCode\n position_info.symbol_type = ContractCode[:-4]\n # 记录本次交易的费用\n trading_fee = OrderLots * (OrderPrice * self.SymbolMultiplier[position_info.symbol_type] * self.BackTestFeeRatio +\n self.BackTestSlipTicks * self.SymbolMinMovePoint[position_info.symbol_type] * self.SymbolMultiplier[position_info.symbol_type])\n\n # 更新最新权益\n self.TotalPortfolioCapital = self.TotalPortfolioCapital - trading_fee\n # 更新最新总费用\n self.TotalTradingFees = self.TotalTradingFees + trading_fee\n # 更新持仓的其他属性信息\n position_info.avg_entry_price = (position_info.avg_entry_price * position_info.current_contracts + OrderPrice * OrderLots) / (\n position_info.current_contracts + OrderLots)\n position_info.last_trading_day_close_price = (\n position_info.last_trading_day_close_price * position_info.current_contracts + OrderPrice * OrderLots) / (\n position_info.current_contracts + OrderLots)\n position_info.current_contracts = position_info.current_contracts + OrderLots\n self.CurrentPosition_L[ContractCode] = position_info\n\n #记录详细的order信息\n self.AllOrderRecordList_specific['time'].append(OrderTime)\n self.AllOrderRecordList_specific['buy/sell'].append('买')\n self.AllOrderRecordList_specific['contract_code'].append(ContractCode)\n self.AllOrderRecordList_specific['order_lots'].append(OrderLots)\n self.AllOrderRecordList_specific['order_price'].append(OrderPrice)\n self.AllOrderRecordList_specific['trading_fee'].append(trading_fee)\n\n\n # position_info.PrintInfo()\n # print(self.TotalPortfolioCapital)\n\n def LBY_BuyToCover(self, OrderTime, ContractCode, OrderLots, OrderPrice, MarketType='QH'):\n if OrderLots <= 0:\n print('error: can not BuyToCover')\n return\n self.AllOrderRecordList.append(f\"{OrderTime}, BuyToCover, {ContractCode}, {OrderLots}, {OrderPrice}\")\n # print(self.AllOrderRecordList[-1])\n position_info = self.CurrentPosition_S.get(ContractCode)\n if position_info is None or position_info.contract_code == '' or position_info.current_contracts == 0:\n print(f\"{ContractCode}无空头持仓,无法平仓\\n\")\n else:\n # 买入平仓,不仅要计算交易费用,还要计算距离昨收价的平仓盈亏\n # 记录本次交易的费用\n trading_fee = OrderLots * (OrderPrice * self.SymbolMultiplier[\n position_info.symbol_type] * self.BackTestFeeRatio + self.BackTestSlipTicks * self.SymbolMinMovePoint[\n position_info.symbol_type] * self.SymbolMultiplier[\n position_info.symbol_type])\n # 记录距离昨收价的平仓盈亏\n daily_profit = (0 - OrderLots) * (OrderPrice - position_info.last_trading_day_close_price) * \\\n self.SymbolMultiplier[position_info.symbol_type]\n # 更新最新权益\n self.TotalPortfolioCapital = self.TotalPortfolioCapital - trading_fee + daily_profit\n # 更新最新总费用\n self.TotalTradingFees = self.TotalTradingFees + trading_fee\n # 平仓不用改变平均持仓价及昨收价,只需要更新持仓头寸就行\n position_info.current_contracts = position_info.current_contracts - OrderLots\n # self.CurrentPosition_S[ContractCode] = position_info\n if position_info.current_contracts == 0:\n self.CurrentPosition_S.pop(ContractCode)\n else:\n self.CurrentPosition_S[ContractCode] = position_info\n # print(self.TotalPortfolioCapital)\n self.AllOrderRecordList_specific['time'].append(OrderTime)\n self.AllOrderRecordList_specific['buy/sell'].append('买平')\n self.AllOrderRecordList_specific['contract_code'].append(ContractCode)\n self.AllOrderRecordList_specific['order_lots'].append(OrderLots)\n self.AllOrderRecordList_specific['order_price'].append(OrderPrice)\n self.AllOrderRecordList_specific['trading_fee'].append(trading_fee)\n\n def LBY_SellShort(self, OrderTime, ContractCode, OrderLots, OrderPrice, MarketType='QH'):\n if OrderLots <= 0:\n print('error: can not SellShort')\n return\n self.AllOrderRecordList.append(f\"{OrderTime}, SellShort, {ContractCode}, {OrderLots}, {OrderPrice}\")\n # print(self.AllOrderRecordList[-1])\n position_info = self.CurrentPosition_S.get(ContractCode)\n if position_info.contract_code == '':\n position_info.contract_code = ContractCode\n position_info.symbol_type = ContractCode[:-4]\n # 记录本次交易的费用\n trading_fee = OrderLots * (OrderPrice * self.SymbolMultiplier[\n position_info.symbol_type] * self.BackTestFeeRatio + self.BackTestSlipTicks * self.SymbolMinMovePoint[\n position_info.symbol_type] * self.SymbolMultiplier[position_info.symbol_type])\n # 更新最新权益\n self.TotalPortfolioCapital = self.TotalPortfolioCapital - trading_fee\n # 更新最新总费用\n self.TotalTradingFees = self.TotalTradingFees + trading_fee\n # 更新持仓的其他属性信息\n position_info.avg_entry_price = (\n position_info.avg_entry_price * position_info.current_contracts + OrderPrice * OrderLots) / (\n position_info.current_contracts + OrderLots)\n position_info.last_trading_day_close_price = (\n position_info.last_trading_day_close_price * position_info.current_contracts + OrderPrice * OrderLots) / (\n position_info.current_contracts + OrderLots)\n position_info.current_contracts = position_info.current_contracts + OrderLots\n self.CurrentPosition_S[ContractCode] = position_info\n # position_info.PrintInfo()\n # print(self.TotalPortfolioCapital)\n self.AllOrderRecordList_specific['time'].append(OrderTime)\n self.AllOrderRecordList_specific['buy/sell'].append('卖')\n self.AllOrderRecordList_specific['contract_code'].append(ContractCode)\n self.AllOrderRecordList_specific['order_lots'].append(OrderLots)\n self.AllOrderRecordList_specific['order_price'].append(OrderPrice)\n self.AllOrderRecordList_specific['trading_fee'].append(trading_fee)\n\n def LBY_Sell(self, OrderTime, ContractCode, OrderLots, OrderPrice, MarketType='QH'):\n if OrderLots <= 0:\n print('error: can not Sell')\n return\n self.AllOrderRecordList.append(f\"{OrderTime}, Sell, {ContractCode}, {OrderLots}, {OrderPrice}\")\n # print(self.AllOrderRecordList[-1])\n position_info = self.CurrentPosition_L.get(ContractCode)\n if position_info is None or position_info.contract_code == '' or position_info.current_contracts == 0:\n print(f\"{ContractCode}无多头持仓,无法平仓\\n\")\n else:\n # 卖出平仓,不仅要计算交易费用,还要计算距离昨收价的平仓盈亏\n # 记录本次交易的费用\n trading_fee = OrderLots * (OrderPrice * self.SymbolMultiplier[\n position_info.symbol_type] * self.BackTestFeeRatio + self.BackTestSlipTicks * self.SymbolMinMovePoint[\n position_info.symbol_type] * self.SymbolMultiplier[\n position_info.symbol_type])\n # 记录距离昨收价的平仓盈亏\n daily_profit = OrderLots * (OrderPrice - position_info.last_trading_day_close_price) * \\\n self.SymbolMultiplier[position_info.symbol_type]\n # 更新最新权益\n self.TotalPortfolioCapital = self.TotalPortfolioCapital - trading_fee + daily_profit\n # 更新最新总费用\n self.TotalTradingFees = self.TotalTradingFees + trading_fee\n # 平仓不用改变平均持仓价及昨收价,只需要更新持仓头寸就行\n position_info.current_contracts = position_info.current_contracts - OrderLots\n # self.CurrentPosition_L[ContractCode] = position_info\n if position_info.current_contracts == 0:\n self.CurrentPosition_L.pop(ContractCode)\n else:\n self.CurrentPosition_L[ContractCode] = position_info\n # print(self.TotalPortfolioCapital)\n self.AllOrderRecordList_specific['time'].append(OrderTime)\n self.AllOrderRecordList_specific['buy/sell'].append('卖平')\n self.AllOrderRecordList_specific['contract_code'].append(ContractCode)\n self.AllOrderRecordList_specific['order_lots'].append(OrderLots)\n self.AllOrderRecordList_specific['order_price'].append(OrderPrice)\n self.AllOrderRecordList_specific['trading_fee'].append(trading_fee)\n\n # ToLots: 目标仓位,绝对值. Side: 持仓方向,long/short\n def LBY_OrderToTarget(self, OrderTime, ContractCode, ToLots, OrderPrice, Side='long', MarketType='QH'):\n # print(f\"LBY_OrderToTarget {OrderTime}, {ContractCode}, {ToLots}, {OrderPrice}, {Side}\")\n if Side == 'long':\n position_info = self.CurrentPosition_L.get(ContractCode)\n if position_info is None:\n if ToLots > 0:\n position_info = LBY_PositionInfo(ContractCode)\n self.CurrentPosition_L[ContractCode] = position_info\n # 全新开仓\n order_lots = abs(ToLots - position_info.current_contracts)\n self.LBY_Buy(OrderTime, ContractCode, order_lots, OrderPrice, MarketType='QH')\n else:\n # 已有持仓信息\n if ToLots > position_info.current_contracts:\n # 加仓\n order_lots = abs(ToLots - position_info.current_contracts)\n self.LBY_Buy(OrderTime, ContractCode, order_lots, OrderPrice, MarketType='QH')\n elif ToLots < position_info.current_contracts:\n # 减仓\n order_lots = abs(ToLots - position_info.current_contracts)\n self.LBY_Sell(OrderTime, ContractCode, order_lots, OrderPrice, MarketType='QH')\n\n elif Side == 'short':\n position_info = self.CurrentPosition_S.get(ContractCode)\n if position_info is None:\n if ToLots > 0:\n position_info = LBY_PositionInfo(ContractCode)\n self.CurrentPosition_S[ContractCode] = position_info\n # 全新开仓\n order_lots = abs(ToLots - position_info.current_contracts)\n self.LBY_SellShort(OrderTime, ContractCode, order_lots, OrderPrice, MarketType='QH')\n else:\n # 已有持仓信息\n if ToLots > position_info.current_contracts:\n # 加仓\n order_lots = abs(ToLots - position_info.current_contracts)\n self.LBY_SellShort(OrderTime, ContractCode, order_lots, OrderPrice, MarketType='QH')\n elif ToLots < position_info.current_contracts:\n # 减仓\n order_lots = abs(ToLots - position_info.current_contracts)\n self.LBY_BuyToCover(OrderTime, ContractCode, order_lots, OrderPrice, MarketType='QH')\n\n # 调仓\n def LBY_RebalancePosition(self, current_dt):\n if len(self.intraday_1m_bars) > 0:\n # bar_times = list(self.intraday_1m_bars['open'].index)\n for rebalance_time_str in self.BackTestRebalanceTimeList:\n strs = rebalance_time_str.split(\":\", 1)\n rebalance_time = datetime.datetime(current_dt.year, current_dt.month, current_dt.day, int(strs[0]),\n int(strs[1]), 0)\n self.traded_counter = self.traded_counter + 1\n for index, row in self.order_info_long.iterrows():\n contract_code = row[self.order_info_columns[0]]\n from_lots = row[self.order_info_columns[1]]\n to_lots = row[self.order_info_columns[2]]\n each_lots = math.ceil(abs(to_lots - from_lots) / len(self.BackTestRebalanceTimeList))\n each_lots = max(1, each_lots)\n\n\n order_price = self.intraday_1m_bars.loc[rebalance_time, contract_code]\n\n\n\n if from_lots > to_lots:\n # 减仓\n self.LBY_OrderToTarget(rebalance_time, contract_code,\n max(to_lots, from_lots - each_lots * self.traded_counter), order_price,\n Side='long')\n elif from_lots < to_lots:\n # 加仓\n self.LBY_OrderToTarget(rebalance_time, contract_code,\n min(to_lots, from_lots + each_lots * self.traded_counter), order_price,\n Side='long')\n for index, row in self.order_info_short.iterrows():\n contract_code = row[self.order_info_columns[0]]\n from_lots = row[self.order_info_columns[1]]\n to_lots = row[self.order_info_columns[2]]\n each_lots = math.ceil(abs(to_lots - from_lots) / len(self.BackTestRebalanceTimeList))\n each_lots = max(1, each_lots)\n # order_price = self.intraday_1m_bars['close'].loc[rebalance_time, contract_code]\n\n order_price = self.intraday_1m_bars.loc[rebalance_time, contract_code]\n\n if from_lots > to_lots:\n\n # 减仓\n self.LBY_OrderToTarget(rebalance_time, contract_code,\n max(to_lots, from_lots - each_lots * self.traded_counter), order_price,\n Side='short')\n elif from_lots < to_lots:\n # 加仓\n self.LBY_OrderToTarget(rebalance_time, contract_code,\n min(to_lots, from_lots + each_lots * self.traded_counter), order_price,\n Side='short')\n\n # 增加后续分析结果生成的部分\n def Trading_Analysis(self):\n if not os.path.exists(\"type_res\"):\n os.makedirs(\"type_res\")\n\n file_name = str(datetime.datetime.now())[:19]\n file_name = re.sub(u\"([^\\u4e00-\\u9fa5\\u0030-\\u0039\\u0041-\\u005a\\u0061-\\u007a])\", \"\", file_name)\n file_path = \"type_res/\"+file_name+self.parameter_symbol_type_list[0]\n\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n # 交易记录\n order_log = self.AllOrderRecordList_specific\n order_log = pd.DataFrame(order_log)\n order_log.to_excel(file_path+\"/order_log.xlsx\")\n start_time = pd.to_datetime(str(order_log['time'][0])[:10])\n\n # 净值计算\n PNL = self.DailyTotalPortfolioCapital\n PNL = pd.DataFrame(PNL, index=['净值'])\n PNL = PNL.T\n PNL = PNL[PNL.index>=start_time]\n PNL['净值'] = PNL['净值'].apply(lambda x: x / self.BackTestStartCash)\n PNL['回撤'] = PNL['净值']-PNL['净值'].cummax()\n fig = plt.figure(figsize=(32,20))\n ax1 = plt.subplot(111)\n PNL['净值'].plot(color='b')\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n plt.legend(loc='upper left',fontsize=20)\n ax2 = plt.twinx()\n PNL['回撤'].plot(color='y',alpha=0.5)\n plt.yticks(fontsize=20)\n plt.legend(fontsize=20)\n plt.savefig(file_path+\"/PNL.png\")\n\n # 交易结果的指标计算\n during_date = (pd.to_datetime(self.BackTestEndDate)-start_time).days\n during_year = during_date / 365.0\n res = {}\n res['收益%'] = (PNL['净值'][-1]-1)*100\n res['年化收益%'] = res['收益%']/during_year\n res['最大回撤%'] = abs(PNL['回撤'].min())*100\n res['收益回撤比'] = res['收益%']/res['最大回撤%']\n res = pd.DataFrame(res,index=['回测统计'])\n res.to_excel(file_path+\"/result_analysis.xlsx\")\n PNL.to_excel(file_path+\"/PNL.xlsx\")\n\n def LBY_DoBackTest(self):\n # print('Working...\\n')\n ''' 预加载平台数据开始 如果没数据,请重新正确运行名为00的notebook '''\n file_path_dominant_contract_code = 'C:/Project/FuturesBackTester/pickle/dominant_contract_code.pickle'\n f = open(file_path_dominant_contract_code, 'rb')\n self.AllDominantContractCodeDic = pickle.load(f)\n f.close()\n ''' 预加载平台数据结束 '''\n\n self.reset_order_info()\n self.LBY_PrepareData()\n\n # 获取交易日列表\n # now_time = datetime.datetime.now()\n # rqdatac.init()\n # HS300 = get_price('000001.XSHE', start_date='2010-01-04',\n # end_date=datetime.datetime.strftime(now_time, \"%Y-%m-%d\"), fields='close')\n HS300 = self.HS300\n\n HS300.index = pd.to_datetime(HS300.index)\n\n\n self.trading_day = HS300\n self.trading_day = self.trading_day[(self.trading_day.index>=self.BackTestBeginDate)&(self.trading_day.index<=self.BackTestEndDate)]\n trading_date_list = list(self.trading_day.index)\n current_year = 0\n\n for trading_date in trading_date_list:\n # print(f\"{trading_date}\")\n # 获取当日(白天,不做夜盘)涉及到的所有合约的1分钟K线信息\n\n self.DominantContractCodeDic = self.AllDominantContractCodeDic[trading_date]\n if current_year == 0 or current_year != trading_date.year:\n print(f\"dealing with {trading_date.year}\")\n current_year = trading_date.year\n f = open(f\"C:/Project/FuturesBackTester/pickle/1m_kbars_{current_year}.pickle\", 'rb')\n self.CurrentYear_1mKbarInfoDic = pickle.load(f)\n f.close()\n\n self.intraday_1m_bars = self.CurrentYear_1mKbarInfoDic[trading_date]\n # if trading_date.year==2019 and trading_date.month==4 and trading_date.day==3:\n # print(self.intraday_1m_bars['close'].columns)\n '''\n contract_code_list = list(self.order_info_long[self.order_info_columns[0]])+list(self.order_info_short[self.order_info_columns[0]])+list(self.CurrentPosition_L.keys())+list(self.CurrentPosition_S.keys())\n for symbol_type in self.parameter_symbol_type_list: #这里可以优化,可以提前存储每日主力合约\n contrace_code = get_dominant_future(symbol_type, trading_date)\n self.DominantContractCodeDic[symbol_type] = contrace_code\n contract_code_list = contract_code_list + [contrace_code]\n contract_code_list = list(set(contract_code_list))\n\n if len(contract_code_list)>0:\n if len(contract_code_list) ==1:\n contract_code_list = contract_code_list + ['CU8888.XSGE']\n self.intraday_1m_bars = get_price(contract_code_list, start_date=datetime.datetime(trading_date.year,trading_date.month,trading_date.day, 8, 0, 0), end_date=datetime.datetime(trading_date.year,trading_date.month,trading_date.day, 15, 15, 0), fields=['open', 'close', 'high', 'low', 'volume'], frequency='1m')\n else:\n self.intraday_1m_bars = {}\n '''\n # print(self.intraday_1m_bars['close'])\n # 执行具体的业务函数\n self.LBY_BeforeTradingDayBegin(trading_date)\n # self.During_Tradingday(trading_date)\n self.LBY_RebalancePosition(trading_date)\n self.LBY_ClearingAfterTradingDayEnd(trading_date)\n self.LBY_AfterTradingDayEnd(trading_date)\n # print(f\" long: {self.order_info_long}\\n short:{self.order_info_short}\\n----------\\n\")\n # break\n stat_res,pnl = self.Trading_Analysis()\n return stat_res,pnl\n\n# ============================ Demo ============================\n\ndef run_single_setting(type_list):\n # print(datetime.datetime.now())\n StartCash = 1000000000 # 初始账户资金\n RebalanceTimeList = ['09:01'] # 时间戳为列表里时间的K线完成时触发调仓逻辑,每个时间格式hh:mm,比如'09:15'\n BeginDate = \"2010-01-01\"\n EndDate = \"2020-11-24\"\n backtester = LBY_BackTester_QH(StartCash, RebalanceTimeList, BeginDate, EndDate)\n backtester.parameter_symbol_type_list = type_list\n backtester.LBY_DoBackTest()\n\n\nif __name__ == '__main__':\n total_list = ['TA','RM','OI','PP','L','CU','ZN','NI','AL','FG','CF','RU','V','BU','ZC','MA','HC','RB','I','J','JM','Y','P','M','A','EG']\n run_single_setting(total_list)\n\n\n\n\n","repo_name":"zhenghaobaby/cta_factors_test","sub_path":"FBT.py","file_name":"FBT.py","file_ext":"py","file_size_in_byte":51550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74342294985","text":"from typing import no_type_check\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef plot():\n ex1a = np.array( [3.072769, 2.872562, 2.962380 ] )\n ex1b = np.array( [3.005225, 3.004530, 2.959063 ] )\n ex1c = np.array( [3.111986, 3.024494, 3.002199 ] )\n ex1d = np.array( [0.689733, 0.690785, 0.699717 ] )\n\n ex1a_mean = np.mean(ex1a)\n ex1b_mean = np.mean(ex1b)\n ex1c_mean = np.mean(ex1c)\n ex1d_mean = np.mean(ex1d)\n \n\n ex1a_std = np.std(ex1a)\n ex1b_std = np.std(ex1b)\n ex1c_std = np.std(ex1c)\n ex1d_std = np.std(ex1d)\n print([ex1a_std,ex1b_std,ex1c_std,ex1d_std])\n\n labels = [f\"Busca sequencial simples\\nMédia:{ex1a_mean: .4g}\\nDesvio Padrão:{ex1a_std: .4g}\",\n f\"Busca realocação\\nMédia:{ex1b_mean: .4g}\\nDesvio Padrão:{ex1b_std: .4g}\", \n f\"Busca transposição\\nMédia:{ex1c_mean: .4g}\\nDesvio Padrão:{ex1c_std: .4g}\", \n f\"Busca por índice primário\\nMédia:{ex1d_mean: .4g}\\nDesvio Padrão:{ex1d_std: .4g}\"]\n \n ctes = [ex1a_mean,ex1b_mean,ex1c_mean,ex1d_mean]\n standardDeviation = [ex1a_std,ex1b_std,ex1c_std,ex1d_std]\n\n xPos = np.arange(4)\n fig, ax = plt.subplots()\n ax.bar(xPos, ctes, yerr=standardDeviation, align='center', alpha=0.5, capsize=10, ecolor='black')\n ax.set_ylabel('Tempo médio percorrido')\n ax.set_xticks(xPos)\n ax.set_xticklabels(labels)\n ax.set_title(\"Grafico do tempo com desvio padrão\")\n ax.yaxis.grid(True)\n\n plt.tight_layout()\n plt.show()\n\nplot()\n\n \n\n","repo_name":"Franreno/TrabalhosFacul","sub_path":"ED2/T2/figures/ex1_plotter.py","file_name":"ex1_plotter.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37903834626","text":"from flask_restplus import fields\nfrom api.apirestplus import api\nfrom database.db import db\n\n\ncliente_restplus = api.model('Cliente', {\n 'cnpj_cpf': fields.String(required=True, description='CNPJ ou CPF do cliente', example='00000000000000'),\n 'nome': fields.String(required=True, description='Nome do cliente - Fantasia + Razão Social'),\n 'email_responsavel': fields.String(required=True, description='E-mail do responsável da empresa')\n})\n\ncliente_liberacao = api.model('Cliente_liberacao', {\n 'liberado': fields.Boolean(required=True, description='Cliente liberado para instalação e atualizações')\n})\n\nclientes_validar_senha = api.model('Cliente_validar_senha', {\n 'senha': fields.String(required=True)\n})\n\n\nclass ClienteModel(db.Model):\n __tablename__ = 'CLIENTES'\n cnpj_cpf = db.Column('cnpjcpf', db.String(14), primary_key=True)\n nome = db.Column(db.String(120), nullable=False)\n autorizado = db.Column(db.Boolean, nullable=False)\n email_responsavel = db.Column('emailresp', db.String(120))\n\n def __init__(self):\n self.cnpj_cpf = ''\n self.nome = ''\n self.autorizado = False\n self.email_responsavel = ''\n","repo_name":"glacsius/jelastic","sub_path":"api/modelos/clientes.py","file_name":"clientes.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33342667768","text":"from deeplake.util.exceptions import ReadOnlyModeError, EmptyTensorError, TransformError\nfrom deeplake.client.utils import get_user_name\nfrom deeplake.cli.auth import logout, login\nfrom click.testing import CliRunner\n\nimport numpy as np\n\nimport posixpath\nimport deeplake\nimport pytest\n\n\ndef populate(ds):\n ds.create_tensor(\"images\", htype=\"image\", sample_compression=\"jpg\")\n ds.create_tensor(\"labels\", htype=\"class_label\")\n\n ds.extend(\n {\n \"images\": np.random.randint(0, 256, (100, 20, 20, 3), dtype=np.uint8),\n \"labels\": np.random.randint(0, 3, (100,)),\n }\n )\n ds.commit()\n\n\n@pytest.mark.slow\ndef test_view_token_only(\n hub_cloud_path, hub_cloud_dev_token, hub_cloud_dev_credentials\n):\n runner = CliRunner()\n result = runner.invoke(logout)\n assert result.exit_code == 0\n\n ds = deeplake.empty(hub_cloud_path, token=hub_cloud_dev_token)\n with ds:\n populate(ds)\n\n ds = deeplake.load(hub_cloud_path, token=hub_cloud_dev_token)\n view = ds[50:100]\n view.save_view(id=\"50to100\")\n\n ds = deeplake.load(hub_cloud_path, read_only=True, token=hub_cloud_dev_token)\n view = ds[25:100]\n view.save_view(id=\"25to100\")\n\n ds = deeplake.load(hub_cloud_path, read_only=True, token=hub_cloud_dev_token)\n\n loaded = ds.load_view(\"50to100\")\n np.testing.assert_array_equal(loaded.images.numpy(), ds[50:100].images.numpy())\n np.testing.assert_array_equal(loaded.labels.numpy(), ds[50:100].labels.numpy())\n assert loaded._vds.path == posixpath.join(hub_cloud_path, \".queries/50to100\")\n\n loaded = ds.load_view(\"25to100\")\n np.testing.assert_array_equal(loaded.images.numpy(), ds[25:100].images.numpy())\n np.testing.assert_array_equal(loaded.labels.numpy(), ds[25:100].labels.numpy())\n assert loaded._vds.path == posixpath.join(hub_cloud_path, \".queries/25to100\")\n\n ds.delete_view(\"25to100\")\n deeplake.delete(hub_cloud_path, token=hub_cloud_dev_token)\n\n\n@pytest.mark.slow\ndef test_view_public(hub_cloud_dev_credentials):\n runner = CliRunner()\n result = runner.invoke(logout)\n assert result.exit_code == 0\n\n username, password = hub_cloud_dev_credentials\n\n ds = deeplake.load(\"hub://activeloop/mnist-train\")\n view = ds[100:200]\n\n with pytest.raises(ReadOnlyModeError):\n view.save_view(id=\"100to200\")\n\n runner.invoke(login, f\"-u {username} -p {password}\")\n\n ds = deeplake.load(\"hub://activeloop/mnist-train\")\n view = ds[100:200]\n\n with pytest.raises(ReadOnlyModeError):\n view.save_view(id=\"100to200\")\n\n runner.invoke(logout)\n\n\ndef test_view_with_empty_tensor(local_ds):\n with local_ds as ds:\n ds.create_tensor(\"images\")\n ds.images.extend([1, 2, 3, 4, 5])\n\n ds.create_tensor(\"labels\")\n ds.labels.extend([None, None, None, None, None])\n ds.commit()\n\n ds[:3].save_view(id=\"save1\", optimize=True)\n\n view = ds.load_view(\"save1\")\n\n assert len(view) == 3\n\n with pytest.raises(EmptyTensorError):\n view.labels.numpy()\n\n np.testing.assert_array_equal(\n view.images.numpy(), np.array([1, 2, 3]).reshape(3, 1)\n )\n\n\n@pytest.mark.slow\ndef test_vds_read_only(hub_cloud_path, hub_cloud_dev_token):\n ds = deeplake.empty(hub_cloud_path, token=hub_cloud_dev_token)\n with ds:\n ds.create_tensor(\"abc\")\n ds.abc.extend([1, 2, 3, 4, 5])\n ds.commit()\n\n ds[:3].save_view(id=\"first_3\")\n\n ds = deeplake.load(hub_cloud_path, read_only=True, token=hub_cloud_dev_token)\n\n view = ds.load_view(\"first_3\")\n\n assert view.base_storage.read_only == True\n assert view._vds.base_storage.read_only == True\n\n\ndef test_view_from_different_commit(local_ds):\n with local_ds as ds:\n ds.create_tensor(\"x\")\n ds.x.extend(list(range(10)))\n cid = ds.commit()\n view = ds[4:9]\n view.save_view(id=\"abcd\")\n ds.x.extend(list(range(10, 20)))\n cid2 = ds.commit()\n view2 = ds.load_view(\"abcd\")\n assert view2.commit_id == cid\n assert ds.commit_id == cid2\n assert not view2.is_optimized\n view2.save_view(id=\"efg\", optimize=True)\n view3 = ds.load_view(\"efg\")\n assert ds.commit_id == cid2\n assert view3.is_optimized\n\n\ndef test_save_view_ignore_errors(local_ds):\n with local_ds as ds:\n ds.create_tensor(\n \"images\", htype=\"link[image]\", sample_compression=\"jpg\", verify=False\n )\n ds.create_tensor(\"labels\", htype=\"class_label\")\n\n ds.images.extend(\n [deeplake.link(\"https://picsum.photos/20/30\") for _ in range(8)]\n )\n ds.images.extend([deeplake.link(\"https://abcd/20\") for _ in range(2)])\n ds.images.extend(\n [deeplake.link(\"https://picsum.photos/20/30\") for _ in range(10)]\n )\n\n ds.labels.extend([0 for _ in range(20)])\n\n ds.commit()\n\n with pytest.raises(TransformError):\n ds[:10].save_view(id=\"one\", optimize=True, num_workers=2)\n\n ds[:10].save_view(id=\"two\", optimize=True, ignore_errors=True, num_workers=2)\n view = ds.load_view(\"two\")\n\n assert len(view) == 8\n\n assert view.images.htype == \"image\"\n assert view.images.shape == (8, 30, 20, 3)\n\n np.testing.assert_array_equal(view.labels.numpy(), np.array([[0]] * 8))\n\n\n@pytest.mark.parametrize(\"optimize_first_view\", [True, False])\n@pytest.mark.parametrize(\"optimize_second_view\", [True, False])\ndef test_save_view_of_view(\n local_ds_generator, optimize_first_view, optimize_second_view\n):\n with local_ds_generator() as ds:\n ds.create_tensor(\"abc\")\n ds.abc.extend(list(range(100)))\n\n ds.commit()\n\n ds[:20].save_view(id=\"first_20\", optimize=optimize_first_view)\n\n view = ds.load_view(\"first_20\")\n\n view[:10].save_view(id=\"first_10\", optimize=optimize_second_view)\n view[10:].save_view(id=\"second_10\", optimize=optimize_second_view)\n\n first_10, second_10 = ds.load_view(\"first_10\"), ds.load_view(\"second_10\")\n np.testing.assert_array_equal(first_10.abc.numpy(), ds[:10].abc.numpy())\n np.testing.assert_array_equal(second_10.abc.numpy(), ds[10:20].abc.numpy())\n\n with local_ds_generator() as ds:\n first_10, second_10 = ds.load_view(\"first_10\"), ds.load_view(\"second_10\")\n np.testing.assert_array_equal(first_10.abc.numpy(), ds[:10].abc.numpy())\n np.testing.assert_array_equal(second_10.abc.numpy(), ds[10:20].abc.numpy())\n\n\n@pytest.mark.parametrize(\"optimize_first_view\", [True, False])\n@pytest.mark.parametrize(\"optimize_second_view\", [True, False])\n@pytest.mark.parametrize(\"optimize_third_view\", [True, False])\ndef test_save_view_of_view_of_view(\n local_ds_generator, optimize_first_view, optimize_second_view, optimize_third_view\n):\n with local_ds_generator() as ds:\n ds.create_tensor(\"abc\")\n ds.abc.extend(list(range(100)))\n\n ds.commit()\n\n ds[:20].save_view(id=\"first_20\", optimize=optimize_first_view)\n\n view = ds.load_view(\"first_20\")\n\n view[:10].save_view(id=\"first_10\", optimize=optimize_second_view)\n\n first_10 = ds.load_view(\"first_10\")\n\n first_10[:5].save_view(id=\"first_5\", optimize=optimize_third_view)\n\n first_5 = ds.load_view(\"first_5\")\n np.testing.assert_array_equal(first_5.abc.numpy(), ds[:5].abc.numpy())\n\n with local_ds_generator() as ds:\n first_5 = ds.load_view(\"first_5\")\n np.testing.assert_array_equal(first_5.abc.numpy(), ds[:5].abc.numpy())\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/api/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"} +{"seq_id":"28960149353","text":"import numpy as np\nimport torch.nn as nn\n\n\nclass Purificator(nn.Module):\n def __init__(self, qbits_num, out_ch_per_rho, filters_ratio, kernel_size, hidden_convs, output_dim = 2, dilation = 1, ratio_type = None, padding = 0):\n super(Purificator, self).__init__()\n\n self.input_channels = 2\n self.dim = 2**qbits_num\n self.kernel_size = kernel_size\n self.qbits_num = qbits_num\n self.ocpr = out_ch_per_rho\n self.output_dim = output_dim\n\n in_ch = self.input_channels\n fr = filters_ratio\n self.out_dim = self.dim\n\n convs = []\n i = 0\n while (self.out_dim + 2*padding - dilation*(self.kernel_size - 1)) >= self.output_dim and i < hidden_convs:\n out_ch = int(in_ch*fr)\n convs.append(nn.Conv2d(in_ch, out_ch, self.kernel_size, dilation = dilation, padding= padding).double())\n convs.append(nn.ReLU())\n in_ch = out_ch\n\n if ratio_type == 'sqrt':\n fr = np.sqrt(fr)\n elif ratio_type == 'sq':\n fr = fr ** 2\n\n self.out_dim += 2*padding - dilation*(self.kernel_size - 1)\n i += 1\n\n self.out_ch = out_ch\n self.convs = nn.Sequential(*convs)\n\n ks = int((self.out_dim + 2*padding - self.output_dim) / dilation) + 1\n self.out_dim += 2*padding - dilation*(ks - 1)\n assert self.out_dim == self.output_dim, \"Wrong output dimension\"\n\n self.output_conv = nn.Conv2d(in_ch, 2*self.ocpr, ks, dilation = dilation, padding = padding)\n\n\n def forward(self, x):\n x = self.convs(x)\n output = self.output_conv(x)\n\n return output","repo_name":"Maticraft/quantum_correlations","sub_path":"commons/models/purificator.py","file_name":"purificator.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12022989461","text":"import logging\nimport time\n\n\n# By default only warning messages or higher are logged unless explicitly set in level.\n\n# Create format String for formatting messages\nLOG_FORMAT = '%(levelname)s %(asctime)s %(message)s'\nlogging.basicConfig(filename='log_file', level=logging.DEBUG,\n format=LOG_FORMAT, filemode='w')\n\n\n# LOG_FORMAT = '%(levelname)s %(asctime)s %(message)s'\n# logging.basicConfig(filename='log_file', level=logging.DEBUG,\n# format=LOG_FORMAT, filemode='w')\n\nlogger = logging.getLogger()\n\nlogger.debug('DEBUG MESSAGE')\nlogger.info('INFO message')\nlogger.warning('WARNING message')\nlogger.error('ERROR message')\nlogger.critical('CRITICAL message')\nprint(logger.level)\n","repo_name":"Johnny-Martinez/EBUpdate","sub_path":"logging_asdf_rename.py","file_name":"logging_asdf_rename.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35934445274","text":"# -*- coding: utf-8 -*-\n\"\"\"Dataframe Cleanup Filter.\n\nThis script cleans up the Dataframe by converting all values to its appropriate type\nto prepare for calculations and analysis.\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndef df_clean_up(df):\n \n #Convert dates to datetime format\n df['Date'] = pd.to_datetime(df['Date'])\n df['Expiration Date'] = pd.to_datetime(df['Expiration Date']) \n\n #Convert values into integer values for calculation\n df['Value'] = pd.to_numeric(df['Value'].str.replace(\",\", \"\"))\n df['Average Price'] = pd.to_numeric(df['Average Price'].str.replace(\",\", \"\"))\n df['Commissions'] = pd.to_numeric(df['Commissions'].str.replace(\"--\", \"\"))\n df['Expiration Date'] = pd.to_datetime(df['Expiration Date'])\n\n return df","repo_name":"JonahLeggett/Derivatives_Portfolio_Analyzer","sub_path":"portfolio_analyzer_app/qualifier/filters/clean_up_df_filter.py","file_name":"clean_up_df_filter.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6232689137","text":"#######\n# First Milestone Project: Develop a Stock Ticker\n# dashboard that either allows the user to enter\n# a ticker symbol into an input box, or to select\n# item(s) from a dropdown list, and uses pandas_datareader\n# to look up and display stock data on a graph.\n######\n\n# ADD PANDAS DATAREADER AND DATETIME TO THE OUTPUT FIGURE\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport pandas_datareader.data as web # requires v0.6.0 or later\nfrom datetime import datetime\n\napp = dash.Dash()\n\napp.layout = html.Div([\n html.H1('Stock Ticker Dashboard'),\n html.H3('Enter a stock symbol:'),\n dcc.Input(\n id='my_ticker_symbol',\n value='TSLA' # sets a default value\n ),\n dcc.Graph(\n id='my_graph',\n figure={\n 'data': [\n {'x': [1,2], 'y': [3,1]}\n ]\n }\n )\n])\n@app.callback(\n Output('my_graph', 'figure'),\n [Input('my_ticker_symbol', 'value')])\ndef update_graph(stock_ticker):\n start = datetime(2017, 1, 1)\n end = datetime(2017, 12, 31)\n df = web.DataReader(stock_ticker,'iex',start,end)\n fig = {\n 'data': [\n {'x': df.index, 'y': df.close}\n ],\n 'layout': {'title':stock_ticker}\n }\n return fig\n\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"Pierian-Data/Plotly-Dashboards-with-Dash","sub_path":"2-17-CodeAlongMilestoneProject/StockTicker3.py","file_name":"StockTicker3.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":822,"dataset":"github-code","pt":"81"} +{"seq_id":"24381414273","text":"\n\nimport requests\nimport hashlib\nfrom rcspider.common import REQ,get_log,ABY,CQ_RC\nfrom lxml.etree import HTML\n\n\nclass CQ(REQ):\n\n def __init__(self):\n \"\"\"重庆\"\"\"\n super(REQ).__init__()\n self.s = requests.session()\n self.s.headers.update({\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\",\n \"Cookie\": \"UM_distinctid=1732868fb4344b-009cfeb9d14544-3a65420e-1fa400-1732868fb443ea; Hm_lvt_03b8714a30a2e110b8a13db120eb6774=1594110837,1594366937,1594602475; regionId=510000; token=3f200f0b24c74429b118d0621bbb28d3; acw_tc=2760825e15946082441805991e19acfd3194ae4680a04bfe334f8ce2aa98eb; CNZZDATA1275173796=34275469-1594106682-%7C1594607048; Hm_lpvt_03b8714a30a2e110b8a13db120eb6774=1594609014\"\n })\n\n self.proxy,self.ipItem = ABY()\n self.IPcount = 0\n self.log = get_log()\n self.baseUrl = 'http://183.66.171.75:88/CQCollect/Ry_Query/zcjzs/Wright_List.aspx'\n self.page = 3\n # self.nextpage = 2\n self.pagecount = 1695\n\n\n def xpathpages(self,resp):\n \"\"\"规则解析\"\"\"\n\n etre = HTML(resp)\n\n trlt = etre.xpath('//tr[@bgcolor=\"#00CCFF\"]/following-sibling::tr')\n\n try:\n for _ in trlt:\n item = {}\n item[\"name\"] = \"\".join(_.xpath('.//td[1]//text()')).strip(\" \")\n item[\"sex\"] = \"\".join(_.xpath('.//td[2]//text()')).strip(\" \")\n item[\"qualifications\"] = \"\".join(_.xpath('.//td[3]//text()')).strip(\" \")\n item[\"leval\"] = \"\".join(_.xpath('.//td[4]//text()')).strip(\" \")\n item[\"company\"] = \"\".join(_.xpath('.//td[6]//text()')).strip(\" \")\n item[\"area\"] = \"\".join(_.xpath('.//td[7]//text()')).strip(\" \")\n item[\"_id\"] = hashlib.md5((item[\"name\"] + item[\"area\"] + item[\"leval\"] ).encode('utf-8')).hexdigest()\n CQ_RC.save(item)\n self.log.info(f\"数据{item['_id']}存入mongo\")\n except Exception as e:\n print(e)\n def spider(self):\n while 1:\n res = self.get_req(self.baseUrl)\n if res:\n ETRE = HTML(res.text)\n __VIEWSTATE = \"\".join(ETRE.xpath('//input[@name=\"__VIEWSTATE\"]/@value'))\n data = {\n \"__EVENTTARGET\":\"\",\n \"__EVENTARGUMENT\": \"\",\n \"__VIEWSTATE\":__VIEWSTATE,\n \"FName\":\"\",\n \"FBaseinfoName\":\"\",\n \"FManageDeptID\": \"-1\",\n \"FLevel\": \"0\",\n \"FQualiNumber\":\"\",\n \"FNumber\": \"\",\n \"FIsWright\": \"-1\",\n \"Pager1:NewPage\": str(self.page),\n \"Pager1:BT_Go.x\": \"8\",\n \"Pager1:BT_Go.y\": \"9\",\n }\n\n while self.page <= self.pagecount:\n\n resp = self.post_req(self.baseUrl,data=data)\n if resp:\n resp = resp.text.replace(\"\\r\", \"\").replace(\"\\t\", \"\").replace(\"\\n\", \"\")\n ETRE = HTML(resp)\n self.xpathpages(resp)\n self.log.info(f\"第{str(self.page)}页数据抓取完成。。。。。。。。\")\n\n __VIEWSTATE = \"\".join(ETRE.xpath('//input[@name=\"__VIEWSTATE\"]/@value'))\n data[\"__VIEWSTATE\"] = __VIEWSTATE\n self.page += 1\n data[\"Pager1:NewPage\"] = str(self.page)\n else:\n print(\"IP无效\")\n\n break\n\n\nst = CQ()\n\nst.spider()","repo_name":"yangwen1997/code","sub_path":"spider_process/rcspider/cq.py","file_name":"cq.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"13611749193","text":"\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@File : datafomat_convert.py\n@Author: Lijing\n@Date : 2021/8/6 16:57\n@Desc : convert 3D nifti data format to 2d rgb image\n\n'''\n'''\nfunction:将.nii.gz格式的三维mask数据转换为二维图像和带标注的检测框的.xml格式(voc数据集格式)\n'''\n\nimport os\nimport xml.etree.ElementTree as ET\nfrom PIL import Image\nimport numpy as np\nimport SimpleITK as sitk\nfrom tqdm import tqdm\nimport cv2\nfrom skimage import measure\nimport copy\n\n\ndef indent(elem, level=0):\n '''\n add enter to xml file\n '''\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n\ndef write_xml(imgname, filepath, labeldicts):\n root = ET.Element('Annotation')\n ET.SubElement(root, 'filename').text = str(imgname)\n sizes = ET.SubElement(root, 'size')\n\n for labeldict in labeldicts:\n ET.SubElement(sizes, 'width').text = str(int(labeldict['weight']))\n ET.SubElement(sizes, 'height').text = str(int(labeldict['height']))\n ET.SubElement(sizes, 'depth').text = '3'\n objects = ET.SubElement(root, 'object') # 创建object子节点\n ET.SubElement(objects, 'name').text = labeldict['name'] # BDD100K_10.names文件中 # 的类别名\n ET.SubElement(objects, 'pose').text = 'Unspecified'\n ET.SubElement(objects, 'truncated').text = '0'\n ET.SubElement(objects, 'difficult').text = '0'\n bndbox = ET.SubElement(objects, 'bndbox')\n ET.SubElement(bndbox, 'xmin').text = str(int(labeldict['xmin']))\n ET.SubElement(bndbox, 'ymin').text = str(int(labeldict['ymin']))\n ET.SubElement(bndbox, 'xmax').text = str(int(labeldict['xmax']))\n ET.SubElement(bndbox, 'ymax').text = str(int(labeldict['ymax']))\n indent(root,0)\n tree = ET.ElementTree(root)\n tree.write(filepath, encoding='utf-8')\n\n\n# param: bounding box,image_array,img_name\ndef save_xml(img_name, img_array, bbox,label_name_list):\n labeldicts = []\n for index,bounding_box in enumerate(bbox):\n sh, sw = img_array.shape[0], img_array.shape[1]\n new_dict = {'name': label_name_list[index],\n 'difficult': '0',\n 'height': sh,\n 'weight': sw,\n 'xmin': str(bounding_box[1]-1),\n 'ymin': str(bounding_box[0]-1),\n 'xmax': str(bounding_box[3]+1),\n 'ymax': str(bounding_box[2]+1),\n\n }\n labeldicts.append(new_dict)\n write_xml(img_name, img_name, labeldicts)\n\n\ndef generator_channel(image_slice,lower,upper):\n image_slice_copy = copy.deepcopy(image_slice)\n image_slice_copy[image_slice_copy > upper] = upper\n image_slice_copy[image_slice_copy< lower] = lower\n img_normalize = (((image_slice_copy - lower) / (upper - lower)) * 255).astype(np.uint8)\n return img_normalize\n\ndef main():\n image_data = './val'\n image_label = './val'\n save_dir = './example/val_pngimage'\n save_xml_dir = './example/val_annotation'\n\n label_list = [\n \"background\",\n \"nodule\",\n \"patches\",\n \"strip\",\n \"grid\",\n \"spherical\",\n \"empty\",\n \"cavity\",\n \"pleuralEffusion\"]\n\n\n label_count = {\"background\":0,\n \"patches\":0,\n \"strip\":0,\n \"grid\":0,\n \"spherical\":0,\n \"empty\":0,\n \"cavity\":0,\n \"pleuralEffusion\":0}\n _COLORS = np.array(\n [\n 0.000, 0.447, 0.741,\n 0.850, 0.325, 0.098,\n 0.929, 0.694, 0.125,\n 0.494, 0.184, 0.556,\n 0.466, 0.674, 0.188,\n 0.301, 0.745, 0.933,\n 0.635, 0.078, 0.184,\n 0.300, 0.300, 0.300,\n 0.600, 0.600, 0.600,\n 1.000, 0.000, 0.000,\n 1.000, 0.500, 0.000,\n 0.749, 0.749, 0.000,\n ]\n ).astype(np.float32).reshape(-1, 3)\n\n\n\n for file in tqdm(os.listdir(image_data)):\n label_dict = []\n if file.startswith('img-'):\n image = sitk.ReadImage(os.path.join(image_data,file))\n image_array = sitk.GetArrayFromImage(image)\n label = sitk.ReadImage(os.path.join(image_label,file.replace('img-','label-')))\n label_array = sitk.GetArrayFromImage(label)\n label_array[label_array == 1] = 0\n # if np.max(label_array) ==1:\n # print(file)\n\n for index in range(image_array.shape[0]):\n if np.max(label_array[index]) > 0:\n bounding_box = []\n label_name_list = []\n\n image_slice = image_array[index]\n img_channel2 = generator_channel(image_slice, -1000, 400) # 肺窗\n img_channel3 = generator_channel(image_slice, -160, 240) # 高衰减\n img_channel1 = generator_channel(image_slice, -1400, -600) # 低衰减\n img_normalize = np.dstack((img_channel1,img_channel2,img_channel3))\n if (img_channel1 == img_channel3).all():\n print('true')\n label_slice = label_array[index]\n label = measure.label(label_slice,connectivity=2)\n region = measure.regionprops(label)\n\n for i in range(len(region)):\n bounding = region[i].bbox\n bounding_box.append(bounding)\n center = region[i].coords[0]\n label_name_list.append(label_list[label_array[index,int(center[0]),int(center[1])]])\n label_count[label_list[label_array[index,int(center[0]),int(center[1])]]] = \\\n label_count[label_list[label_array[index,int(center[0]),int(center[1])]]] + 1\n # color = (_COLORS[label_array[index,int(center[0]),int(center[1])]] * 255).astype(np.uint8).tolist()\n # cv2.rectangle(img_normalize,(bounding[1],bounding[0]),(bounding[3],bounding[2]),color =color,thickness=2)\n cv2.imwrite(os.path.join(save_dir,file.split('.')[0]+str('-')+str(index)+'.png'),img_normalize)\n\n # cv2.imshow('xx',img_normalize)\n # cv2.waitKey(0)\n save_xml(os.path.join(save_xml_dir,file.split('.')[0]+str('-')+str(index)+'.xml'),img_normalize,bounding_box,label_name_list)\n\n\nif __name__ =='__main__':\n main()","repo_name":"aichijing/prepare_data","sub_path":"data_format_convert.py","file_name":"data_format_convert.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14325139883","text":"from threading import Timer\nfrom time import sleep\n\ndef myInterrupt():\n global sampleRFID\n print('interrupt')\n sampleRFID= True\n timer= Timer(1.0, myInterrupt)\n timer.start()\n\nsampleRFID= True\nwhile(1):\n if(sampleRFID):\n #sample\n print('sample')\n sampleRFID= False\n# timer= Timer(1.0, myInterrupt)\n# timer.start()\n print('hello world')\n sleep(0.3)\n\n","repo_name":"ekahn27/MyWork","sub_path":"Python/testFor5725.py","file_name":"testFor5725.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11290378635","text":"import sys\nimport pygame\nfrom pygame.locals import *\nimport time\nimport brain as logic\n\n# CONSTANTS\nBLACK = (0, 0, 0)\nGREEN = pygame.Color(0,255,0)\nGREY = pygame.Color(128, 128, 128)\nWHITE = pygame.Color(255, 255, 255) # White\nBLUE = pygame.Color(0, 0, 255) \nRED = pygame.Color(255, 0, 0) \nXDIM = 400\nYDIM = 400\n\n# Initialize game\npygame.init()\npygame.display.set_caption(\"Minesweeper\")\nDISPLAYSURF = pygame.display.set_mode((XDIM, YDIM),pygame.SCALED) \nFramePerSec = pygame.time.Clock()\nFPS = 50\nDISPLAYSURF.fill(GREY)\nMINE_IMG = pygame.image.load(\"resources/mine.png\")\nfont = pygame.font.SysFont(\"Verdana\", 20)\nprevhitlist = set()\nmines = []\nWinner = -1\n\ndef drawboardlines(DISPLAYSURF, XDIM, YDIM, Color):\n i = 0\n while i <= XDIM:\n pygame.draw.line(DISPLAYSURF, Color, (0,i), (XDIM, i)) # For horizontal line\n pygame.draw.line(DISPLAYSURF, Color, (i,0), (i, YDIM)) # For vertical line\n i += 40\n\n\ndef changecellcolor(cell, color): #Single cell change color to 'color'\n x,y = cell\n i = 1\n while i < 40:\n pygame.draw.line(DISPLAYSURF, color, ((x*40)+1,(y*40)+i),((x*40)+39,(y*40)+i))\n i += 1\n\n\ndef clearcells(cell): #Clear cells around central\n x,y=cell\n for ax,ay in ((x+1,y),(x-1,y),(x,y+1),(x,y-1),(x-1,y-1),(x+1,y+1),(x-1,y+1),(x+1,y-1)):\n if ax>=0 and ax <=9 and ay>=0 and ay <=9:\n if (ax,ay) in prevhitlist:\n continue\n else:\n if (ax,ay) in mines:\n changecellcolor((ax,ay),BLUE)\n else:\n changecellcolor((ax,ay), WHITE)\n value = font.render(str(matrix[ax][ay]), True, BLACK)\n DISPLAYSURF.blit(value, ((ax*40)+11,(ay*40)+11))\n prevhitlist.add((ax,ay))\n\nif __name__ == \"__main__\":\n\n drawboardlines(DISPLAYSURF, XDIM, YDIM, BLACK)\n mines = []\n matrix = [[0 for x in range(10)] for y in range(10)]\n debug = 0\n mines = logic.placement(debug,matrix,mines)\n matrix = logic.numbering(matrix,mines)\n curscore = 0\n mouse_pressed = False\n press_time = time.time()\n # Game loop\n while True:\n scores = font.render(str(curscore), True, BLACK)\n #DISPLAYSURF.blit(scores, (10,10))\n for event in pygame.event.get(): #pygame.event.get gets the event done by the user (mouse scroll, click, type etc)\n if event.type == QUIT: #If we attempt to guit the game, the game quits.\n pygame.quit() #Closes the pygame window\n sys.exit() # Closes the python script\n\n elif event.type == MOUSEBUTTONDOWN:\n xcoord,ycoord = pygame.mouse.get_pos()\n x = xcoord//40\n y = ycoord//40\n cell = (x,y)\n if len(prevhitlist) == 0 and cell in mines: # So that person doesn't die on first chance\n mines = logic.placement(debug,matrix,mines)\n matrix = logic.numbering(matrix,mines)\n\n if event.button == 3:\n changecellcolor(cell,BLUE)\n prevhitlist.add(cell)\n else:\n if (cell in prevhitlist) and cell not in mines:\n pygame.display.set_caption(\"HIT SOMEWHERE ELSE\")\n else:\n prevhitlist.add(cell)\n if cell in mines:\n changecellcolor(cell,RED) # Hit mine\n Winner = 0\n #showscore(prevhitlist)\n else:\n changecellcolor(cell,WHITE) # Missed mine\n value = font.render(str(matrix[x][y]), True, BLACK)\n DISPLAYSURF.blit(value, ((x*40)+11,(y*40)+10))\n clearcells(cell)\n if pygame.display.get_caption()[0] == \"HIT SOMEWHERE ELSE\":\n pygame.display.set_caption(\"Minesweeper\") \n if set(mines).issubset(prevhitlist):\n Winner = 1\n\n if Winner == 1:\n pygame.display.set_caption(\"WINNER\")\n drawboardlines(DISPLAYSURF, XDIM, YDIM, GREEN)\n pygame.display.update()\n time.sleep(5)\n pygame.quit() #Closes the pygame window\n sys.exit() # Closes the python script\n \n elif Winner == 0:\n pygame.display.set_caption(\"LOSER\")\n drawboardlines(DISPLAYSURF, XDIM, YDIM, RED)\n pygame.display.update()\n time.sleep(5)\n pygame.quit() #Closes the pygame window\n sys.exit() # Closes the python script\n\n pygame.display.update()\n FramePerSec.tick(FPS) #Setting the frame rate of 60fps","repo_name":"sohanpatil1/Minesweeper-Pygame","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2492066513","text":"import sys\nspam = 0\nwhile spam < 5:\n print('Hello World')\n spam = spam + 1\n\n'''---------------------'''\n\nprint('''\n \n ''')\nprint(' ')\nprint(' ')\n'''--------------------'''\n\nname = ''\nwhile name != 'your name':\n \n name =input('Real name please? ')\n \nprint('Thank you!!')\n\nsys.exit","repo_name":"radmarion/Automate-the-Boring-Stuff","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17191555460","text":"\n\ndef order():\n drink_wish = input(\"What would you like? espresso/latte/cappuccino? \")\n while drink_wish == \"report\":\n print(f\"Water: {resources['water']}ml\\n\"\n f\"Milk: {resources['milk']}ml\\n\"\n f\"Coffee: {resources['coffee']}g\\n\"\n f\"Money: ${resources['money']}\\n\")\n drink_wish = input(\"What would you like? espresso/latte/cappuccino? \")\n if drink_wish == \"off\":\n return False\n else:\n return drink_wish\n\n\ndef check_resources(drink_ingredients, input_resources):\n missing_ingredient = \"\"\n for ingredient in drink_ingredients:\n if drink_ingredients[f\"{ingredient}\"] > input_resources[f\"{ingredient}\"]:\n missing_ingredient = ingredient\n return missing_ingredient\n\n\ndef check_order(input_order):\n if input_order == \"espresso\" or \"latte\" or \"cappuccino\":\n missing_ingredient = check_resources(DRINK_INFO[\"ingredients\"], resources)\n if missing_ingredient != \"\":\n print(f\"Not enough {missing_ingredient}.\")\n else:\n check_budget(resources)\n\n\ndef insert_coins():\n print(\"Please insert coins\")\n quarters = int(input(f\"How many quarters?: \")) * 0.25\n dimes = int(input(f\"How many dimes?: \")) * 0.10\n nickles = int(input(f\"How many nickles?: \")) * 0.05\n pennies = int(input(f\"How many pennies?: \")) * 0.01\n budget = float(quarters + dimes + nickles + pennies)\n return budget\n\n\ndef check_budget(input_resources):\n budget = insert_coins()\n if budget < DRINK_INFO[\"cost\"]:\n print(\"Sorry, not enough money. Money returned.\")\n else:\n input_resources[\"money\"] += DRINK_INFO[\"cost\"]\n change = round(budget - DRINK_INFO[\"cost\"], 2)\n if change == 0:\n print(\"Right on point, you don't need change!\")\n else:\n print(f\"Here is ${change} in change.\")\n consume_resources(DRINK_INFO[\"ingredients\"], resources)\n\n\ndef consume_resources(drink_ingredients, input_resources):\n for ingredient in drink_ingredients:\n input_resources[ingredient] -= drink_ingredients[ingredient]\n print(f\"Here is your {DRINK_WISH} ☕. Enjoy!\")\n\n\nMENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n \"money\": 0,\n}\n\nnew_order = True\nwhile new_order:\n DRINK_WISH = order()\n if not DRINK_WISH:\n new_order = False\n else:\n DRINK_INFO = MENU[DRINK_WISH]\n check_order(DRINK_WISH)\n","repo_name":"omitsch/coffeemachine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22427602710","text":"#!/usr/bin/python3\n\"\"\"NQueens problem\"\"\"\n\n\nimport sys\n\n\ndef print_board(board):\n \"\"\"Print the nqueens board\"\"\"\n chess = []\n for i in board:\n for j in i:\n if j == 1:\n chess.append([board.index(i), i.index(j)])\n print(chess)\n\n\ndef is_safe(board, row, column, value):\n \"\"\"Check if current board is safe\"\"\"\n for i in range(column):\n if board[row][i] + board[row][i + 1] != 0:\n return False\n for i, j in zip(range(row, -1, -1), range(column, -1, -1)):\n if board[i][j] == 1:\n return False\n for i, j in zip(range(row, value, 1), range(column, -1, -1)):\n if board[i][j] == 1:\n return False\n return True\n\n\ndef nqueens(board, column, value):\n \"\"\"nqueens recursive function\"\"\"\n if column >= value:\n print_board(board)\n for i in range(value):\n if is_safe(board, i, column, value):\n board[i][column] = 1\n if nqueens(board, column + 1, value):\n return True\n board[i][column] = 0\n return False\n\n\nif __name__ == \"__main__\":\n \"\"\"Main function for nqueens\"\"\"\n if len(sys.argv) != 2:\n print(\"Usage: nqueens N\")\n exit(1)\n if sys.argv[1].isnumeric():\n n = int(sys.argv[1])\n else:\n print(\"N must be a number\")\n exit(1)\n if n < 4:\n print(\"N must be at least 4\")\n exit(1)\n board = [[0 for i in range(n)] for j in range(n)]\n nqueens(board, 0, n)\n","repo_name":"faspen/holbertonschool-interview","sub_path":"0x0C-nqueens/0-nqueens.py","file_name":"0-nqueens.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23829009410","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 14 21:42:19 2018\n@author: j.lappalainen\nPlots inspired by:\nMolano-Mazon, Manuel, Arno Onken, Eugenio Piasini, and Stefano Panzeri. \"Synthesizing realistic neural population\nactivity patterns using Generative Adversarial Networks.\" arXiv preprint arXiv:1803.00338 (2018).\n\"\"\"\n\nimport numpy as np\nfrom utils.plot_props import PlotProps\n\n\nclass Evaluate:\n \"\"\"This class implements several evaluation metrics.\n\n Args:\n groundtruth (array): Groundtruth spike data\n (#repeats, #bins, #neurons)\n time (float): Total duration of the spike train in s.\n Attributes:\n groundtruth (array): Groundtruth spike data\n (#repeats, #bins, #neurons)\n n_repeats (int): # repetitions\n n_bins (int): # bins\n n_neurons (int): # neurons\n time (float): Total duration of the spike train in s.\n \"\"\"\n\n def __init__(self, groundtruth, time=1):\n self.groundtruth = np.array(groundtruth)\n self.n_repeats, self.n_bins, self.n_neurons = self.groundtruth.shape\n self.time = time\n\n def spike_count_average(self, generated):\n \"\"\"Computes the average number of spikes per neuron.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n Returns:\n ndarray: Spike count average of generated data.\n ndarray: Spike count average of groundtruth data.\n \"\"\"\n generated = self._check(generated)\n return generated.mean(axis=(0, 1)) / self.time, \\\n self.groundtruth.mean(axis=(0, 1)) / self.time\n\n def spike_count_std(self, generated):\n \"\"\"Computes the standard deviation of number of spikes per neuron.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n Returns:\n ndarray: Spike count std of generated data.\n ndarray: Spike count std of groundtruth data.\n \"\"\"\n generated = self._check(generated)\n return generated.std(axis=(0, 1)) / self.time, \\\n self.groundtruth.std(axis=(0, 1)) / self.time\n\n def spikes_per_bin(self, generated):\n \"\"\"\"\"\"\n generated = self._check(generated)\n return generated.mean(axis=0), \\\n self.groundtruth.mean(axis=0)\n\n def correlation(self, generated):\n \"\"\"Computes the normalized covariance between pairs of neurons.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n Note: The resulting square matrix contains both, pairwise\n covariance of neurons within and across generated and grountruth\n data. For indexing,the first n_neurons correspond to generated\n neurons and the next n_neurons correspond to groundtruth neurons.\n Returns:\n ndarray: Square matrix of size (2 * n_neurons, 2 * n_neurons).\n \"\"\"\n generated = self._check(generated)\n return np.corrcoef(generated.reshape(-1, self.n_neurons).T,\n self.groundtruth.reshape(-1, self.n_neurons).T)\n\n def signal_correlation(self, generated):\n r\"\"\"\n Computes signal correlation between pairs of neurons\n Reference: Lyamzin, D. R., Macke, J. H., & Lesica, N. A. (2010). Modeling population spike trains with\n specified time-varying spike rates, trial-to-trial variability, and pairwise signal and noise\n correlations\n Args:\n generated (ndarray): generated spike data\n (#repeats, #bins, #neurons)\n\n Returns:\n ndarray: Square matrix of size (2 * n_neurons, 2 * n_neurons).\n \"\"\"\n generated = self._check(generated)\n groundtruth_shuffled = np.copy(self.groundtruth)\n\n np.random.shuffle(groundtruth_shuffled)\n\n generated_shuffled = np.copy(generated)\n np.random.shuffle(generated_shuffled)\n gt_corr_shuffled = np.corrcoef(self.groundtruth.reshape(-1, self.n_neurons).T,\n groundtruth_shuffled.reshape(-1, self.n_neurons).T).T\n\n gen_corr_shuffled = np.corrcoef(generated.reshape(-1, self.n_neurons).T,\n generated_shuffled.reshape(-1, self.n_neurons).T).T\n\n return gt_corr_shuffled, gen_corr_shuffled\n\n def noise_correlation(self, generated):\n r\"\"\"\n Computes noise correlation between pairs of neurons\n Args:\n generated (ndarray): generated spike data\n (#repeats, #bins, #neurons)\n\n Returns:\n ndarray: Square matrix of size (2 * n_neurons, 2 * n_neurons).\n \"\"\"\n generated = self._check(generated)\n return self.correlation(generated) - self.signal_correlation(generated)\n\n def lag_correlation(self, generated):\n \"\"\"Lag-covariance between pairs of neurons.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n Note: for each pair of neurons, we shift the activity of one of the\n neurons by one bin and compute the covariance between the resulting\n activities. This quantity thus indicates how strongly the activity\n of one of the neurons is related to the future activity of the\n other neuron.\n Returns:\n ndarray: Square matrix with lag covariance for generated\n neurons (n_neurons, n_neurons)\n ndarray: Square matrix with lag covariance for groundtruth\n neurons (n_neurons, n_neurons)\n\n \"\"\"\n generated = self._check(generated)\n return np.corrcoef(np.roll(generated.reshape(-1, self.n_neurons),\n -1, axis=0),\n generated.reshape(-1, self.n_neurons)), \\\n nnp.corrcoef(np.roll(self.groundtruth.reshape(-1, self.n_neurons),\n -1, axis=0),\n self.groundtruth.reshape(-1, self.n_neurons))\n\n def average_time_course(self):\n return NotImplemented\n\n def synchrony(self, generated):\n return NotImplemented\n\n def autocorrelogram(self, generated):\n return NotImplemented\n\n def _check(self, generated):\n generated = np.array(generated)\n assert generated.shape == self.groundtruth.shape, \"Size mismatch!\"\n return generated\n\n def _scramble(a, axis=-1):\n \"\"\"\n Return an array with the values of `a` independently shuffled along the\n given axis\n Source:https://stackoverflow.com/questions/36272992/numpy-random-shuffle-by-row-independently\n \"\"\"\n b = a.swapaxes(axis, -1)\n n = a.shape[axis]\n idx = np.random.choice(n, n, replace=False)\n b = b[..., idx]\n return b.swapaxes(axis, -1)\n\n\nclass Visualize(Evaluate):\n \"\"\"Visualization class that leverages the evaluation metrics.\n\n Args:\n groundtruth (array): Groundtruth spike data\n (#repeats, #bins, #neurons)\n time (float): Total duration of the spike train in s.\n Attributes:\n plot (object): A plot properties instance, specifying\n some layout choices.\n Please read the Evaluation docstring for more infos.\n \"\"\"\n plot = PlotProps()\n\n def __init__(self, groundtruth, time=None):\n super(Visualize, self).__init__(groundtruth, time)\n\n def mean(self, generated, model, ax=None, marker='.', markersize=10):\n \"\"\"Plots the spike-count-average for each neuron of\n groundtruth vs. the generated data.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n model (str): The name of the model.\n ax (object, optional): An existing axis object.\n Defaults to None which creates a new axis.\n marker (str): Defaults to '.'.\n Returns:\n object: Axis object.\n \"\"\"\n if not ax:\n ax = self.plot.init_subplot('Mean Firing probabilities')\n model_mean, gt_mean = self.spike_count_average(generated)\n vmax = np.max([model_mean.max(), gt_mean.max()])\n ax.plot([0, vmax + .2], [0, vmax + .2], 'black')\n ax.plot(gt_mean, model_mean, marker, label=model, markersize=markersize)\n # ax.set_xlabel('Real Mean Firing probabilities')\n # ax.set_ylabel('Generated Mean Firing probabilities')\n # ax.legend()\n return ax\n\n def std(self, generated, model, ax=None, marker='.'):\n \"\"\"Plots the spike-count-std for each neuron of\n groundtruth vs. the generated data.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n model (str): The name of the model.\n ax (object, optional): An existing axis object.\n Defaults to None which creates a new axis.\n marker (str): Defaults to '.'.\n Returns:\n object: Axis object.\n \"\"\"\n if not ax:\n ax = self.plot.init_subplot('Std')\n model_std, gt_std = self.spike_count_std(generated)\n vmax = np.max([model_std.max(), gt_std.max()])\n ax.plot([0, vmax + .2], [0, vmax + .2], 'black')\n ax.plot(gt_std, model_std, marker, label=model, markersize=13)\n ax.set_xlabel('Real Std')\n ax.set_ylabel('Generated Std')\n ax.legend()\n return ax\n\n def mean_per_bin(self, generated, model, neurons=None, marker='.', label=None,\n figsize=[5, 5]):\n \"\"\"Plots a grid of the generated mean activity in timebins vs. the expected mean\n activity in timebins.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n model (str): The name of the model.\n neurons (list, optional): List of neurons that are supposed to be plotted.\n Defaults to None, i.e. all neurons are plottet.\n marker (str): Defaults to '.'.\n label (str, optional): If specified, the subplots will be labeled in the top\n right corner with 'label%s'%neuron.\n figsize (list): Size of the figure.\n Returns:\n object: Axis object.\n \"\"\"\n model, gt = self.spikes_per_bin(generated)\n if not neurons:\n neurons = np.arange(0, self.n_neurons, 1)\n gridwidth = int(np.ceil(np.sqrt(len(neurons))))\n gridheight = gridwidth if gridwidth * (gridwidth - 1) < len(neurons) else (gridwidth - 1)\n fig = self.plot.init_figure(figsize=figsize)\n for i, neuron in enumerate(neurons):\n ax = self.plot.init_subplot('',\n tot_tup=(gridheight, gridwidth),\n sp_tup=(int(i // gridwidth), int(i % gridwidth)))\n ax.plot(gt[:, neuron], model[:, neuron], '.', alpha=0.8)\n ax.plot([0, 1], [0, 1], 'black')\n if isinstance(label, str):\n ax.text(0.65, 0.9, '%s%s' % (label, neuron), transform=ax.transAxes, ha='right',\n fontsize='small')\n fig.suptitle('Mean per Bin', y=1.0)\n fig.tight_layout()\n fig.text(0.5, 0.001, 'Expected Mean (a.u.)', ha='center')\n fig.text(0.001, 0.5, 'Generated Mean (a.u.)', va='center', rotation='vertical')\n\n def corr(self, generated, model, ax=None, marker='.', markersize=10):\n \"\"\"Plots the intrinsic correlation between neurons of\n the generated data vs. the groundtruth data.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n model (str): The name of the model.\n ax (object, optional): An existing axis object.\n Defaults to None which creates a new axis.\n marker (str): Defaults to '.'.\n Returns:\n object: Axis object.\n \"\"\"\n if not ax:\n ax = self.plot.init_subplot('Pairwise Total Correlation')\n corr = self.correlation(generated)\n triu_idx = np.triu_indices(n=self.n_neurons, k=1)\n within_gen = corr[:self.n_neurons, :self.n_neurons][triu_idx]\n within_gt = corr[self.n_neurons::, self.n_neurons::][triu_idx]\n vmax = np.max([within_gen.max(), within_gt.max()])\n vmin = np.max([within_gen.min(), within_gt.min()])\n ax.plot([vmin - .1, vmax + .1], [vmin - .1, vmax + .1], 'black')\n ax.plot(within_gt, within_gen, marker, label=model, markersize=markersize)\n # ax.set_xlabel('Real Correlation')\n # ax.set_ylabel('Generated Correlation')\n # ax.legend()\n return ax\n\n def noise_corr(self, generated, model, ax=None, marker='.', markersize=10):\n \"\"\"Plots the noise correlation between neurons of\n the generated data vs. the groundtruth data.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n model (str): The name of the model.\n ax (object, optional): An existing axis object.\n Defaults to None which creates a new axis.\n marker (str): Defaults to '.'.\n Returns:\n object: Axis object.\n \"\"\"\n if not ax:\n ax = self.plot.init_subplot('Pairwise Noise Correlation')\n gt_corr, gen_corr = self.signal_correlation(generated)\n\n triu_idx_tot = np.triu_indices(n=self.n_neurons, k=1)\n triu_idx_sig = np.triu_indices(n=self.n_neurons*2, k=self.n_neurons+1)\n\n tot_corr_gen = gen_corr[triu_idx_tot]\n tot_corr_gt = gt_corr[triu_idx_tot]\n\n sig_corr_gen = gen_corr[triu_idx_sig]\n sig_corr_gt = gt_corr[triu_idx_sig]\n\n noise_corr_gen = tot_corr_gen - sig_corr_gen\n noise_corr_gt = tot_corr_gt - sig_corr_gt\n\n vmax = np.max([noise_corr_gen.max(), noise_corr_gt.max()])\n vmin = np.max([noise_corr_gen.min(), noise_corr_gt.min()])\n ax.plot([vmin-.1, vmax+.1], [vmin-.1, vmax+.1], 'black')\n ax.plot(noise_corr_gt, noise_corr_gen, marker, label=model, markersize=markersize)\n # ax.set_xlabel('Real Noise Correlation')\n # ax.set_ylabel('Generated Noise Correlation')\n # ax.legend()\n return ax\n\n def signal_corr(self, generated, model, ax=None, marker='.'):\n \"\"\"Plots the signal correlation between neurons of\n the generated data vs. the groundtruth data.\n\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n model (str): The name of the model.\n ax (object, optional): An existing axis object.\n Defaults to None which creates a new axis.\n marker (str): Defaults to '.'.\n Returns:\n object: Axis object.\n \"\"\"\n if not ax:\n ax = self.plot.init_subplot('Pairwise Signal Correlation')\n gt_corr, gen_corr = self.signal_correlation(generated)\n\n triu_idx_sig = np.triu_indices(n=self.n_neurons*2, k=self.n_neurons+1)\n\n sig_corr_gen = gen_corr[triu_idx_sig]\n sig_corr_gt = gt_corr[triu_idx_sig]\n\n vmax = np.max([sig_corr_gen.max(), sig_corr_gt.max()])\n vmin = np.max([sig_corr_gen.min(), sig_corr_gt.min()])\n ax.plot([vmin-.1, vmax+.1], [vmin-.1, vmax+.1], 'black')\n ax.plot(sig_corr_gt, sig_corr_gen, marker, label=model, markersize=8)\n # ax.set_xlabel('Real Signal Correlation')\n # ax.set_ylabel('Generated Signal Correlation')\n # ax.legend()\n return ax\n\n def spiketrains(self, generated, neurons=[0, 1], trial_avg=False, figsize=[12, 3], aspect='auto', labels=False):\n \"\"\"Plots an overview of the spiketrains.\n Args:\n generated (array): Generated spike data\n (#repeats, #bins, #neurons)\n neurons (list, optional): List of neurons that are supposed to be plotted.\n Defaults to [0, 1]. Leave empty to plot all neurons with trial averages.\n trial_avg (bool): Useful for an overview over all neurons! Whether to average over trials.\n figsize (list): Size of the figure.\n labels (bool): Label y axis with neuron numbers.\n Returns:\n tuple of objects: (ax1, ax2)\n \"\"\"\n if not neurons:\n neurons = np.arange(0, self.n_neurons, 1)\n trial_avg = True\n labels = False\n print(\"No neurons selected. Averaging over trials to fit all neurons into the plot. Neuron labels were set\"\n \" off.\")\n if trial_avg:\n generated, groundtruth = self.spikes_per_bin(generated)\n generated = generated[None, ...]\n groundtruth = groundtruth[None, ...]\n n_repeats = 1\n else:\n generated = self._check(generated)\n groundtruth = self.groundtruth\n n_repeats = self.n_repeats\n\n fig = self.plot.init_figure(figsize)\n ax1 = self.plot.init_subplot('Groundtruth',\n tot_tup=(2, 1),\n sp_tup=(0, 0))\n ax2 = self.plot.init_subplot('Generated',\n tot_tup=(2, 1),\n sp_tup=(1, 0),\n sharex=ax1)\n ax2.set_xlabel('Timebins')\n ax1.set_ylabel('Neurons', labelpad=20)\n ax2.set_ylabel('Neurons', labelpad=20)\n if labels:\n ypad = 1 - 1 / (2 * (len(neurons) + 1))\n for i, neuron in enumerate(neurons):\n ax1.text(-0.01, ypad - i / len(neurons), str(neuron), transform=ax1.transAxes, ha='right', va='center')\n ax2.text(-0.01, ypad - i / len(neurons), str(neuron), transform=ax2.transAxes, ha='right', va='center')\n indices = np.tile(np.arange(0, n_repeats), len(neurons)) \\\n + np.array(neurons).repeat(n_repeats) * n_repeats\n ax1.imshow(groundtruth.transpose((2, 0, 1)).reshape(-1, self.n_bins)[indices], aspect=aspect)\n ax2.imshow(generated.transpose((2, 0, 1)).reshape(-1, self.n_bins)[indices], aspect=aspect)\n ax1 = self._rm_spines(ax1)\n ax2 = self._rm_spines(ax2)\n return ax1, ax2\n\n def _rm_spines(self, ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.set_xticks([])\n ax.set_yticks([])\n return ax\n","repo_name":"bmeatayi/neurogan","sub_path":"utils/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":18674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2501643003","text":"from itertools import combinations\nfrom itertools import permutations\nimport numpy as np\nimport pandas as pd \nimport sqlite3\nfrom sqlite3 import Error as sqlite3Error\nimport json\n \ndef sacar_combinaciones(tipos):\n # combinaciones = pd.DataFrame(columns=['Tipo1','Tipo2','num','compatibles'])\n combinaciones = tipos.drop_duplicates(subset='num', keep='first')\n combinaciones = combinaciones.reset_index(drop=True)\n combinaciones['compatibles'] = 0\n combinaciones['incompatibles'] = 0\n combinaciones['compatibles'] = combinaciones['compatibles'].astype('object')\n combinaciones['incompatibles'] = combinaciones['incompatibles'].astype('object')\n for i in range (len(combinaciones)):\n # seria_actual = combinaciones.iloc[i]\n combinaciones_compatibles = combinaciones[combinaciones['Tipo1'] != combinaciones['Tipo1'][i]].copy()\n combinaciones_compatibles = combinaciones_compatibles[combinaciones_compatibles['Tipo2'] != combinaciones['Tipo1'][i]].copy()\n combinaciones_compatibles = combinaciones_compatibles[combinaciones_compatibles['Tipo1'] != combinaciones['Tipo2'][i]].copy()\n combinaciones_compatibles = combinaciones_compatibles[combinaciones_compatibles['Tipo2'] != combinaciones['Tipo2'][i]].copy()\n combinaciones['compatibles'][i] = combinaciones_compatibles['num'].tolist()\n combinaciones_not_compatibles = np.setdiff1d(combinaciones['num'].tolist(), combinaciones['compatibles'][i])\n combinaciones['incompatibles'][i] = combinaciones_not_compatibles.tolist().copy()\n return combinaciones\n\ndef common_elements(list1, list2):\n return [element for element in list1 if element in list2]\n\ndef sacar_tipos(tipos):\n tipos['num'] = 0\n contador = 1\n for i in range (len(tipos)):\n for j in range (len(tipos)):\n if tipos['num'][i] == 0:\n tipos['num'][i] = contador\n contador += 1\n tipo1_aux = tipos['Tipo1'][i]\n tipo2_aux = tipos['Tipo2'][i]\n if (tipo1_aux == tipos['Tipo1'][j] and tipo2_aux == tipos['Tipo2'][j]) or (tipo1_aux == tipos['Tipo2'][j] and tipo2_aux == tipos['Tipo1'][j]):\n tipos['num'][j] = tipos['num'][i]\n return tipos\n\n# Demasiaado lento\n#def sacar_todas_combinaciones(combinaciones, todas_combinaciones):\n# n_prueba = 0\n# hechos = []\n# for n_prueba in range (len(combinaciones)):\n# n = combinaciones['num'][n_prueba]\n# posibles_actuales = combinaciones['compatibles'][n_prueba]\n# posibles_actuales = list(set(posibles_actuales) - set(hechos))\n# posibles_combinaciones = permutations(posibles_actuales)\n# for i in posibles_combinaciones:\n# lista_aux = list(i)\n# actual_final_list = lista_aux.copy()\n# for j in lista_aux:\n# if j in actual_final_list:\n# set_aux = set(actual_final_list)\n# incompatible_list_aux = combinaciones[combinaciones['num'] == j]['incompatibles'].tolist()\n# incompatible_list_aux_sin_numero = incompatible_list_aux[0].copy()\n# incompatible_list_aux_sin_numero.remove(j)\n# set_comparar = set(incompatible_list_aux_sin_numero)\n# actual_final_list= list(set_aux - set_comparar) \n# else:\n# pass\n# actual_final_list.append(combinaciones['num'][n_prueba])\n# actual_final_list.sort()\n# print (i)\n# if actual_final_list not in todas_combinaciones:\n# todas_combinaciones.append(actual_final_list)\n# print (todas_combinaciones)\n# print (i)\n# hechos.append(n)\n# #print(str(n_prueba) + '_' + str(len(posibles_combinaciones)))\n# return todas_combinaciones\n#\n\ndef sacar_todas_combinaciones(combinaciones, todas_combinaciones):\n iterador = 0\n for n in range (len(combinaciones)):\n n = combinaciones['num'][iterador]\n compatibles_base = combinaciones['compatibles'][iterador]\n for i in range (len(compatibles_base)):\n list_aux = compatibles_base.copy()\n list_aux.insert(0, list_aux.pop(i))\n for j in list_aux:\n if j in list_aux:\n set_aux = set(list_aux)\n incompatible_list_aux = combinaciones[combinaciones['num'] == j]['incompatibles'].tolist()\n incompatible_list_aux_sin_numero = incompatible_list_aux[0].copy()\n incompatible_list_aux_sin_numero.remove(j)\n set_comparar = set(incompatible_list_aux_sin_numero)\n list_aux= list(set_aux - set_comparar)\n else:\n pass\n list_aux.append(combinaciones['num'][iterador])\n list_aux.sort()\n if list_aux not in todas_combinaciones:\n todas_combinaciones.append(list_aux)\n iterador += 1\n return todas_combinaciones\n\ndef connect_to_db():\n try:\n conn = sqlite3.connect('/home/jofa/Documents/pokimones/pokemon-soullink-team-generator/PokeTeamViewer/poke_database.db')\n except sqlite3Error as e:\n print(e)\n return conn\n\ndef create_tables(conn):\n table1 = \"\"\" CREATE TABLE IF NOT EXISTS Datos_base (\n id integer PRIMARY KEY,\n Ruta text NOT NULL,\n Pokemon_1 text NOT NULL,\n Mote_1 text NOT NULL,\n Tipo_1 text NOT NULL,\n Pokemon_2 text NOT NULL,\n Mote_2 text NOT NULL,\n Tipo_2 text NOT NULL,\n Id_Tipo integer NOT NULL\n ); \"\"\"\n \n table2 = \"\"\" CREATE TABLE IF NOT EXISTS Asociacion_tipos (\n id integer PRIMARY KEY,\n Tipo_1 text NOT NULL,\n Tipo_2 text NOT NULL,\n Id_Tipo integer NOT NULL); \"\"\"\n \n table3 = \"\"\" CREATE TABLE IF NOT EXISTS Posibles_combinaciones (\n id integer PRIMARY KEY,\n Combinacion text NOT NULL,\n Cantidad integer NOT NULL); \"\"\"\n table4 = \"\"\" CREATE TABLE IF NOT EXISTS Incompatible (\n id integer PRIMARY KEY,\n Tipo integer NOT NULL,\n incompatibles text NOT NULL); \"\"\"\n try:\n c = conn.cursor()\n c.execute(table1)\n c.execute(table2)\n c.execute(table3)\n c.execute(table4)\n except sqlite3Error as e:\n print(e)\n\ndef fill_tables(conn, pokimones, tipos, todas_combinaciones, combinaciones):\n sql1 = \"\"\" INSERT INTO Datos_base (Ruta, Pokemon_1, Mote_1, Tipo_1, Pokemon_2, Mote_2, Tipo_2, Id_Tipo) VALUES (?, ?, ?, ?, ?, ?, ?, ?) \"\"\"\n sql2 = \"\"\" INSERT INTO Asociacion_tipos (Tipo_1, Tipo_2, Id_Tipo) VALUES (?, ?, ?) \"\"\"\n sql3 = \"\"\" INSERT INTO Posibles_combinaciones (Combinacion, Cantidad) VALUES (?, ?) \"\"\"\n sql4 = \"\"\" INSERT INTO Incompatible (Tipo, incompatibles) VALUES (?, ?) \"\"\"\n try:\n c = conn.cursor()\n for i in range (len(pokimones)):\n c.execute(sql1, (pokimones['Localizacion'][i], pokimones['Pokemon_'][i], pokimones['Mote_'][i], pokimones['Tipo_'][i], pokimones['Pokemon'][i], pokimones['Mote'][i], pokimones['Tipo'][i], int(pokimones['id_tipo'][i])))\n for i in range (len(tipos)):\n c.execute(sql2, (tipos['Tipo1'][i], tipos['Tipo2'][i], int(tipos['num'][i])))\n for i in range (len( todas_combinaciones)):\n list_element = todas_combinaciones[i]\n tamanyo = len(list_element)\n meter = json.dumps(list(map(int, list_element)))\n c.execute(sql3, (meter, tamanyo))\n for i in range (len(combinaciones)):\n list_element = combinaciones['incompatibles'][i]\n meter = json.dumps(list(map(int, list_element)))\n c.execute(sql4, (int(combinaciones['num'][i]), meter))\n conn.commit()\n \n except sqlite3Error as e:\n print(e)\n\ndef main():\n conn = None # conexion a la base de datos\n pokimones = pd.read_table('/home/jofa/Documents/pokimones/pokemon-soullink-team-generator/python/src/Bocatacas.tsv')\n entrenador1 = pd.DataFrame(columns=['Ruta','Pokemon','Tipo'])\n entrenador2 = pd.DataFrame(columns=['Ruta','Pokemon','Tipo']) \n entrenador1[['Ruta','Pokemon','Tipo']] = pokimones[['Localizacion', 'Pokemon_', 'Tipo_']].copy()\n entrenador2[['Ruta','Pokemon','Tipo']] = pokimones[['Localizacion', 'Pokemon', 'Tipo']].copy()\n tipos = pd.DataFrame(columns=['Tipo1','Tipo2','num'])\n tipos['Tipo1']= entrenador1['Tipo'].copy()\n tipos['Tipo2']= entrenador2['Tipo'].copy()\n tipos = tipos[tipos['Tipo1'] != tipos['Tipo2']]\n pokimones = pokimones[pokimones['Tipo_'] != pokimones['Tipo']]\n pokimones = pokimones.reset_index(drop=True)\n tipos = tipos.reset_index(drop=True)\n tipos = sacar_tipos(tipos)\n pokimones['id_tipo'] = tipos['num'].copy()\n combinaciones = sacar_combinaciones(tipos)\n todas_combinaciones = []\n todas_combinaciones = sacar_todas_combinaciones(combinaciones, todas_combinaciones)\n # for i in todas_combinaciones:\n # print (i)\n todas_combinaciones_df = pd.DataFrame(todas_combinaciones)\n todas_combinaciones_df.to_excel('/home/jofa/Documents/pokimones/pokemon-soullink-team-generator/python/output/todas_combinaciones.xlsx')\n conn = connect_to_db()\n create_tables(conn)\n fill_tables(conn, pokimones, tipos, todas_combinaciones, combinaciones)\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jofainita/pokemon-soullink-team-generator","sub_path":"python/equipos.py","file_name":"equipos.py","file_ext":"py","file_size_in_byte":10018,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"12260608007","text":"from django.contrib import admin\nimport redis\nfrom models import Players, PlayerSessions, LogGameEvents, PlayerAchievements, PlayerStats\n\n\nclass PlayerAdmin(admin.ModelAdmin):\n def save_model(self, request, obj, form, change):\n obj.save()\n redis_con = redis.StrictRedis(host='localhost', port=6379, db=0)\n redis_con.zadd('scores', obj.xp, obj.id)\n\nadmin.site.register([PlayerAchievements, PlayerStats, PlayerSessions, LogGameEvents])\nadmin.site.register(Players, admin_class=PlayerAdmin)\n","repo_name":"drednout/letspython_final_project","sub_path":"admintool/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6437597284","text":"from src import states\r\n\r\n\r\nclass Snake:\r\n def __init__(self, boundaries:list):\r\n self.body = list()\r\n self.direction = int()\r\n self.bounds = boundaries\r\n\r\n self.init()\r\n\r\n def init(self):\r\n self.direction = states.WEST\r\n\r\n def move(self):\r\n \"\"\"\r\n Adds the new head and removes the tail.\r\n\r\n Returns:\r\n list: new head (i, j)\r\n list: ols tail (i, j)\r\n \"\"\"\r\n\r\n old_head = self.body[0]\r\n i_head, j_head = [\r\n old_head[0] + self.direction[0],\r\n old_head[1] + self.direction[1],\r\n ]\r\n\r\n if i_head < 0:\r\n i_head = self.bounds[1] - 1\r\n elif i_head >= self.bounds[1]:\r\n i_head = 0\r\n\r\n if j_head < 0:\r\n j_head = self.bounds[0] - 1\r\n elif j_head >= self.bounds[0]:\r\n j_head = 0\r\n\r\n self.body.insert(0, (i_head, j_head))\r\n\r\n i_tail, j_tail = self.body.pop(-1)\r\n\r\n return (i_head, j_head), (i_tail, j_tail)\r\n\r\n def eat_tail(self):\r\n \"\"\"\r\n Returns if the snake eats its tail.\r\n \"\"\"\r\n\r\n body = set(self.body)\r\n return len(body) < len(self.body)\r\n\r\n # Magics\r\n\r\n def __getitem__(self, index):\r\n return self.body[index]\r\n","repo_name":"deplanty/mini-games","sub_path":"Snake/src/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19290395681","text":"from lofar.messaging import DEFAULT_BUSNAME, DEFAULT_BROKER, RPCException\nfrom lofar.parameterset import PyParameterValue\nfrom lofar.sas.otdb.OTDBBusListener import OTDBEventMessageHandler, OTDBBusListener\nfrom lofar.sas.otdb.otdbrpc import OTDBRPC\nfrom lofar.common import isProductionEnvironment\nfrom lofar.common.subprocess_utils import communicate_returning_strings\nfrom lofar.sas.resourceassignment.resourceassignmentservice.rpc import RADBRPC\nfrom lofar.sas.otdb.config import DEFAULT_OTDB_NOTIFICATION_SUBJECT\n\nimport subprocess\nimport pipes\nimport os\nimport re\nfrom socket import getfqdn\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n# NDPPP seems to like to have 2 cores.\nDEFAULT_NUMBER_OF_CORES_PER_TASK = 2\n# This needs to match what's in SLURM\nNUMBER_OF_NODES = 40\nNUMBER_OF_CORES_PER_NODE = 24\n# We /4 because we can then run 4 pipelines, and -2 to reserve cores for TBBwriter\nDEFAULT_NUMBER_OF_TASKS = (NUMBER_OF_NODES // 4) * (NUMBER_OF_CORES_PER_NODE - 2) // DEFAULT_NUMBER_OF_CORES_PER_TASK\n\n\ndef runCommand(cmdline, input=None):\n logger.info(\"runCommand starting: %s\", cmdline)\n\n # Start command\n proc = subprocess.Popen(\n cmdline,\n stdin=subprocess.PIPE if input else None,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n universal_newlines=True\n )\n\n # Feed input and wait for termination\n logger.debug(\"runCommand input: %s\", input)\n stdout, _ = communicate_returning_strings(proc, input)\n logger.debug(\"runCommand output: %s\", stdout)\n\n # Check exit status, bail on error\n if proc.returncode != 0:\n logger.warning(\"runCommand(%s) had exit status %s with output: %s\", cmdline, proc.returncode, stdout)\n raise subprocess.CalledProcessError(proc.returncode, cmdline)\n\n # Return output\n return stdout.strip()\n\n\n\"\"\" Prefix that is common to all parset keys, depending on the exact source. \"\"\"\nPARSET_PREFIX=\"ObsSW.\"\n\n\nclass Parset(dict):\n def predecessors(self):\n \"\"\" Extract the list of predecessor obs IDs from the given parset. \"\"\"\n\n key = PARSET_PREFIX + \"Observation.Scheduler.predecessors\"\n strlist = PyParameterValue(str(self[key]), True).getStringVector()\n\n # Key contains \"Lxxxxx\" values, we want to have \"xxxxx\" only\n result = [int(list(filter(str.isdigit,x))) for x in strlist]\n\n return result\n\n def isObservation(self):\n return self[PARSET_PREFIX + \"Observation.processType\"] == \"Observation\"\n\n def isPipeline(self):\n return not self.isObservation()\n\n def processingCluster(self):\n return self[PARSET_PREFIX + \"Observation.Cluster.ProcessingCluster.clusterName\"] or \"CEP2\"\n\n def processingPartition(self):\n result = self[PARSET_PREFIX + \"Observation.Cluster.ProcessingCluster.clusterPartition\"] or \"cpu\"\n if '/' in result:\n logger.error('clusterPartition contains invalid value: %s. Defaulting clusterPartition to \\'cpu\\'', result)\n return 'cpu'\n return result\n\n def processingNumberOfCoresPerTask(self):\n result = int(self[PARSET_PREFIX + \"Observation.Cluster.ProcessingCluster.numberOfCoresPerTask\"]) or None\n if not result:\n logger.warning('Invalid Observation.Cluster.ProcessingCluster.numberOfCoresPerTask: %s, defaulting to %i',\n result, DEFAULT_NUMBER_OF_CORES_PER_TASK)\n result = DEFAULT_NUMBER_OF_CORES_PER_TASK\n return result\n\n def processingNumberOfTasks(self):\n \"\"\" Parse the number of nodes to allocate from\n \"Observation.Cluster.ProcessingCluster.numberOfTasks\" \"\"\"\n\n result = int(self[PARSET_PREFIX +\n \"Observation.Cluster.ProcessingCluster.numberOfTasks\"].strip()) or None\n\n # apply bound\n if not result or result <= 0 or result > NUMBER_OF_NODES * NUMBER_OF_CORES_PER_NODE:\n logger.warning('Invalid Observation.Cluster.ProcessingCluster.numberOfTasks: %s, defaulting to %s',\n result, DEFAULT_NUMBER_OF_TASKS)\n result = DEFAULT_NUMBER_OF_TASKS\n\n return result\n\n @staticmethod\n def dockerRepository():\n return \"nexus.cep4.control.lofar:18080\"\n\n @staticmethod\n def defaultDockerImage():\n return \"lofar-pipeline\"\n\n @staticmethod\n def defaultDockerTag():\n if isProductionEnvironment():\n # \"latest\" refers to the current /production/ image\n return \"latest\"\n else:\n # test/dev environments want to use their specific version, since they\n # share images with the production environment\n return runCommand(\"docker-template\", \"${LOFAR_TAG}\")\n\n def dockerImage(self):\n # Return the version set in the parset, and fall back to our own version.\n image = self[PARSET_PREFIX + \"Observation.ObservationControl.PythonControl.softwareVersion\"]\n\n if not image:\n image = self.defaultDockerImage()\n\n if \":\" in image:\n return image\n\n # Insert our tag by default\n return \"%s:%s\" % (image, self.defaultDockerTag())\n\n def otdbId(self):\n return int(self[PARSET_PREFIX + \"Observation.otdbID\"])\n\n def description(self):\n return \"%s - %s\" % (self.get(PARSET_PREFIX + \"Observation.Campaign.name\", 'unknown'),\n self.get(PARSET_PREFIX + \"Observation.Scheduler.taskName\", 'unknown'))\n \n\nclass Slurm(object):\n def __init__(self, headnode=\"head.cep4.control.lofar\"):\n self.headnode = headnode\n\n def _runCommand(self, cmdline, input=None):\n cmdline = \"ssh %s %s\" % (self.headnode, cmdline)\n return runCommand(cmdline, input)\n\n def submit(self, jobName, cmdline, sbatch_params=None):\n if sbatch_params is None:\n sbatch_params = []\n\n script = \"\"\"#!/bin/bash -v\n {cmdline}\n \"\"\".format(cmdline = cmdline)\n\n stdout = self._runCommand(\"sbatch --job-name=%s %s\" % (jobName, \" \".join(sbatch_params)), script)\n\n # Returns \"Submitted batch job 3\" -- extract ID\n match = re.search(\"Submitted batch job (\\d+)\", stdout)\n if not match:\n return None\n\n return match.group(1)\n\n def cancel(self, jobName):\n self._runCommand(\"scancel --jobname %s\" % (jobName,))\n\n def isQueuedOrRunning(self, jobName):\n stdout = self._runCommand(\"sacct --starttime=2016-01-01 --noheader --parsable2 --format=jobid --name=%s --state=PENDING,CONFIGURING,RUNNING,RESIZING,COMPLETING,SUSPENDED\" % (jobName,))\n\n return stdout != \"\"\n\n\nclass PipelineDependencies(object):\n class TaskNotFoundException(Exception):\n \"\"\" Raised when a task cannot be found in the RADB. \"\"\"\n pass\n\n def __init__(self, exchange=DEFAULT_BUSNAME, broker=DEFAULT_BROKER):\n self.rarpc = RADBRPC.create(exchange=exchange, broker=broker)\n logger.info('PipelineDependencies busname=%s', exchange)\n self.otdbrpc = OTDBRPC.create(exchange=exchange, broker=broker)\n\n def open(self):\n self.rarpc.open()\n self.otdbrpc.open()\n\n def close(self):\n self.rarpc.close()\n self.otdbrpc.close()\n\n def __enter__(self):\n self.open()\n return self\n\n def __exit__(self, type, value, tb):\n self.close()\n\n def getState(self, otdb_id):\n \"\"\"\n Return the status of a single `otdb_id'.\n \"\"\"\n return self.otdbrpc.taskGetStatus(otdb_id=otdb_id)\n\n def getPredecessorStates(self, otdb_id):\n \"\"\"\n Return a dict of {\"sasid\":\"status\"} pairs of all the predecessors of `otdb_id'.\n \"\"\"\n radb_task = self.rarpc.getTask(otdb_id=otdb_id)\n\n if radb_task is None:\n raise PipelineDependencies.TaskNotFoundException(\"otdb_id %s not found in RADB\" % (otdb_id,))\n\n predecessor_radb_ids = radb_task['predecessor_ids']\n predecessor_tasks = self.rarpc.getTasks(task_ids=predecessor_radb_ids)\n\n #get states from otdb in order to prevent race conditions between states in radb/otdb\n predecessor_otdb_ids = [t[\"otdb_id\"] for t in predecessor_tasks]\n predecessor_states = { otdb_id:self.getState(otdb_id) for otdb_id in predecessor_otdb_ids }\n\n logger.debug(\"getPredecessorStates(%s) = %s\", otdb_id, predecessor_states)\n\n return predecessor_states\n\n def getSuccessorIds(self, otdb_id):\n \"\"\"\n Return a list of all the successors of `otdb_id'.\n \"\"\"\n radb_task = self.rarpc.getTask(otdb_id=otdb_id)\n\n if radb_task is None:\n raise PipelineDependencies.TaskNotFoundException(\"otdb_id %s not found in RADB\" % (otdb_id,))\n\n successor_radb_ids = radb_task['successor_ids']\n successor_tasks = self.rarpc.getTasks(task_ids=successor_radb_ids) if successor_radb_ids else []\n successor_otdb_ids = [t[\"otdb_id\"] for t in successor_tasks]\n\n logger.debug(\"getSuccessorIds(%s) = %s\", otdb_id, successor_otdb_ids)\n\n return successor_otdb_ids\n\n def canStart(self, otdbId):\n \"\"\"\n Return whether `otdbId' can start, according to the status of the predecessors\n and its own status.\n \"\"\"\n\n try:\n myState = self.getState(otdbId)\n predecessorStates = self.getPredecessorStates(otdbId)\n except PipelineDependencies.TaskNotFoundException as e:\n logger.error(\"canStart(%s): Error obtaining task states, not starting pipeline: %s\", otdbId, e)\n return False\n\n startable = (myState == \"scheduled\" and all([x == \"finished\" for x in list(predecessorStates.values())]))\n logger.info(\"canStart(%s)? state = %s, predecessors = %s, canStart = %s\", otdbId, myState, predecessorStates, startable)\n return startable\n\n def getTasks(self, task_status, task_type):\n return self.rarpc.getTasks(task_status=task_status, task_type=task_type)\n\n\nclass PipelineControlHandler( OTDBEventMessageHandler):\n def __init__(self, exchange, broker):\n super(PipelineControlHandler, self).__init__()\n\n logger.info('PipelineControl busname=%s', exchange)\n self.exchange = exchange\n self.otdbrpc = OTDBRPC.create(exchange=exchange, broker=broker)\n self.dependencies = PipelineDependencies(exchange=exchange, broker=broker)\n self.slurm = Slurm()\n\n def _setStatus(self, otdb_id, status):\n self.otdbrpc.taskSetStatus(otdb_id=otdb_id, new_status=status)\n\n def _getParset(self, otdbId):\n try:\n return Parset(self.otdbrpc.taskGetSpecification(otdb_id=otdbId)[\"specification\"])\n except RPCException as e:\n # Parset not in OTDB, probably got deleted\n logger.error(\"Cannot retrieve parset of task %s: %s\", otdbId, e)\n return None\n\n def start_handling(self):\n self.otdbrpc.open()\n self.dependencies.open()\n\n super(PipelineControlHandler, self).start_handling()\n\n def stop_handling(self):\n super(PipelineControlHandler, self).stop_handling()\n\n self.dependencies.close()\n self.otdbrpc.close()\n\n def check_scheduled_pipelines(self):\n try:\n logger.info(\"Checking for already scheduled pipelines...\")\n\n scheduled_pipelines = self.dependencies.getTasks(task_status='scheduled',\n task_type='pipeline')\n logger.info(\"Checking %s scheduled pipelines if they can start.\",\n len(scheduled_pipelines))\n\n for pipeline in scheduled_pipelines:\n logger.info(\"Checking if scheduled pipeline otdbId=%s can start.\",\n pipeline['otdb_id'])\n try:\n otdbId = pipeline['otdb_id']\n parset = self._getParset(otdbId)\n if not parset or not self._shouldHandle(parset):\n continue\n\n # Maybe the pipeline can start already\n if self.dependencies.canStart(otdbId):\n self._startPipeline(otdbId, parset)\n else:\n logger.info(\"Job %s was set to scheduled, but cannot start yet.\", otdbId)\n except Exception as e:\n logger.error(e)\n except Exception as e:\n logger.error(e)\n\n logger.info(\"...finished checking for already scheduled pipelines.\")\n\n @staticmethod\n def _shouldHandle(parset):\n try:\n if not parset.isPipeline():\n logger.info(\"Not processing tree: is not a pipeline\")\n return False\n\n if parset.processingCluster() == \"CEP2\":\n logger.info(\"Not processing tree: is a CEP2 pipeline\")\n return False\n except KeyError as e:\n # Parset not complete\n logger.error(\"Parset incomplete, ignoring: %s\", e)\n return False\n\n return True\n\n @staticmethod\n def _jobName(otdbId):\n return str(otdbId)\n\n def _startPipeline(self, otdbId, parset):\n \"\"\"\n Schedule \"docker-runPipeline.sh\", which will fetch the parset and run the pipeline within\n a SLURM job.\n \"\"\"\n\n # Avoid race conditions by checking whether we haven't already sent the job\n # to SLURM. Our QUEUED status update may still be being processed.\n if self.slurm.isQueuedOrRunning(otdbId):\n logger.info(\"Pipeline %s is already queued or running in SLURM.\", otdbId)\n return\n\n logger.info(\"***** START Otdb ID %s *****\", otdbId)\n\n # Determine SLURM parameters\n sbatch_params = [\n # Only run job if all nodes are ready\n \"--wait-all-nodes=1\",\n\n # Enforce the dependencies, instead of creating lingering jobs\n \"--kill-on-invalid-dep=yes\",\n\n # Annotate the job\n \"--comment=%s\" % pipes.quote(pipes.quote(parset.description())),\n\n # Lower priority to drop below inspection plots\n \"--nice=1000\",\n\n \"--partition=%s\" % parset.processingPartition(),\n \"--ntasks=%s\" % parset.processingNumberOfTasks(),\n \"--cpus-per-task=%s\" % parset.processingNumberOfCoresPerTask(),\n\n # Define better places to write the output\n os.path.expandvars(\"--output=/data/log/pipeline-%s-%%j.log\" % (otdbId,)),\n ]\n\n def setStatus_cmdline(status):\n return (\n \"ssh {myhostname} '\"\n \"source {lofarroot}/lofarinit.sh && \"\n \"setOTDBTreeStatus -o {obsid} -s {status} -B {busname}\"\n \"'\"\n .format(\n myhostname = getfqdn(),\n lofarroot = os.environ.get(\"LOFARROOT\", \"\"),\n obsid = otdbId,\n status = status,\n busname = self.exchange,\n ))\n\n def getParset_cmdline():\n return (\n \"ssh {myhostname} '\"\n \"source {lofarroot}/lofarinit.sh && \"\n \"getOTDBParset -o {obsid}'\"\n .format(\n myhostname = getfqdn(),\n lofarroot = os.environ.get(\"LOFARROOT\", \"\"),\n obsid = otdbId,\n ))\n\n\n try:\n logger.info(\"Handing over pipeline %s to SLURM\", otdbId)\n\n # Schedule runPipeline.sh\n slurm_job_id = self.slurm.submit(self._jobName(otdbId),\n \"\"\"\n # Run a command, but propagate SIGINT and SIGTERM\n function runcmd {{\n trap 'kill -s SIGTERM $PID' SIGTERM\n trap 'kill -s SIGINT $PID' SIGINT\n \n \"$@\" &\n PID=$!\n wait $PID # returns the exit status of \"wait\" if interrupted\n wait $PID # returns the exit status of $PID\n CMDRESULT=$?\n \n trap - SIGTERM SIGINT\n \n return $CMDRESULT\n }}\n \n # print some info\n echo Running on $SLURM_NODELIST\n \n # notify OTDB that we're running\n runcmd {setStatus_active}\n \n # notify ganglia\n wget -O - -q \"http://ganglia.control.lofar/ganglia/api/events.php?action=add&start_time=now&summary=Pipeline {obsid} ACTIVE&host_regex=\"\n\n # fetch parset\n runcmd {getParset} > {parset_file}\n \n # run the pipeline\n runcmd docker-run-slurm.sh --rm --net=host \\\n -e LOFARENV={lofarenv} \\\n -v $HOME/.ssh:$HOME/.ssh:ro \\\n -e SLURM_JOB_ID=$SLURM_JOB_ID \\\n -v /data:/data \\\n {image} \\\n runPipeline.sh -o {obsid} -c /opt/lofar/share/pipeline/pipeline.cfg.{cluster} -P {parset_dir} -p {parset_file}\n RESULT=$?\n \n # notify that we're tearing down\n runcmd {setStatus_completing}\n \n if [ $RESULT -eq 0 ]; then\n # wait for MoM to pick up feedback before we set finished status\n # AS: I increased this to 300 sec to be in line with the wait time after observation finished\n # and because we still note quite a lot of feedback issues in MoM\n runcmd sleep 300\n \n # if we reached this point, the pipeline ran succesfully\n runcmd {setStatus_finished}\n \n # notify ganglia\n wget -O - -q \"http://ganglia.control.lofar/ganglia/api/events.php?action=add&start_time=now&summary=Pipeline {obsid} FINISHED&host_regex=\"\n else\n # If we are killed by the pipeline being set to aborted, we just went from aborted->completing\n # but our abort_trigger may already have been cancelled. Set the status here too to avoid lingering\n # in completing\n runcmd {setStatus_aborted}\n fi\n \n # report status back to SLURM\n echo \"Pipeline exited with status $RESULT\"\n exit $RESULT\n \"\"\".format(\n lofarenv = os.environ.get(\"LOFARENV\", \"\"),\n obsid = otdbId,\n parset_dir = \"/data/parsets\",\n parset_file = \"/data/parsets/Observation%s.parset\" % (otdbId,),\n repository = parset.dockerRepository(),\n image = parset.dockerImage(),\n cluster = parset.processingCluster(),\n\n getParset = getParset_cmdline(),\n setStatus_active = setStatus_cmdline(\"active\"),\n setStatus_completing = setStatus_cmdline(\"completing\"),\n setStatus_finished = setStatus_cmdline(\"finished\"),\n setStatus_aborted = setStatus_cmdline(\"aborted\"),\n ),\n\n sbatch_params=sbatch_params\n )\n logger.info(\"Scheduled SLURM job %s for otdb_id=%s\", slurm_job_id, otdbId)\n\n # Schedule pipelineAborted.sh\n logger.info(\"Scheduling SLURM job for pipelineAborted.sh\")\n slurm_cancel_job_id = self.slurm.submit(\"%s-abort-trigger\" % self._jobName(otdbId),\n \"\"\"\n # notify OTDB\n {setStatus_aborted}\n \n # notify ganglia\n wget -O - -q \"http://ganglia.control.lofar/ganglia/api/events.php?action=add&start_time=now&summary=Pipeline {obsid} ABORTED&host_regex=\"\n \"\"\"\n .format(\n setStatus_aborted = setStatus_cmdline(\"aborted\"),\n obsid = otdbId,\n ),\n\n sbatch_params=[\n \"--partition=%s\" % parset.processingPartition(),\n \"--cpus-per-task=1\",\n \"--ntasks=1\",\n \"--dependency=afternotok:%s\" % slurm_job_id,\n \"--kill-on-invalid-dep=yes\",\n \"--requeue\",\n \"--output=/data/log/abort-trigger-%s.log\" % (otdbId,),\n ]\n )\n logger.info(\"Scheduled SLURM job %s for abort trigger for otdb_id=%s\", slurm_cancel_job_id, otdbId)\n\n logger.info(\"Handed over pipeline %s to SLURM, setting status to QUEUED\", otdbId)\n self._setStatus(otdbId, \"queued\")\n except Exception as e:\n logger.error(str(e))\n self._setStatus(otdbId, \"aborted\")\n\n def _stopPipeline(self, otdbId):\n # Cancel corresponding SLURM job, but first the abort-trigger\n # to avoid setting ABORTED as a side effect.\n # to be cancelled as well.\n\n if not self.slurm.isQueuedOrRunning(otdbId):\n logger.info(\"_stopPipeline: Job %s not running\", otdbId)\n return\n\n def cancel(jobName):\n logger.info(\"Cancelling job %s\", jobName)\n self.slurm.cancel(jobName)\n\n jobName = self._jobName(otdbId)\n cancel(\"%s-abort-trigger\" % jobName)\n cancel(jobName)\n\n def _startSuccessors(self, otdbId):\n try:\n successor_ids = self.dependencies.getSuccessorIds(otdbId)\n except PipelineDependencies.TaskNotFoundException as e:\n logger.error(\"_startSuccessors(%s): Error obtaining task successors, not starting them: %s\", otdbId, e)\n return\n\n for s in successor_ids:\n parset = self._getParset(s)\n if not parset or not self._shouldHandle(parset):\n continue\n\n if self.dependencies.canStart(s):\n self._startPipeline(s, parset)\n else:\n logger.info(\"Job %s still cannot start yet.\", otdbId)\n\n def onObservationScheduled(self, otdbId, modificationTime):\n parset = self._getParset(otdbId)\n if not parset or not self._shouldHandle(parset):\n return\n\n # Maybe the pipeline can start already\n if self.dependencies.canStart(otdbId):\n self._startPipeline(otdbId, parset)\n else:\n logger.info(\"Job %s was set to scheduled, but cannot start yet.\", otdbId)\n\n def onObservationFinished(self, otdbId, modificationTime):\n \"\"\" Check if any successors can now start. \"\"\"\n\n logger.info(\"Considering to start successors of %s\", otdbId)\n self._startSuccessors(otdbId)\n\n def onObservationAborted(self, otdbId, modificationTime):\n parset = self._getParset(otdbId)\n if parset and not self._shouldHandle(parset): # stop jobs even if there's no parset\n return\n\n logger.info(\"***** STOP Otdb ID %s *****\", otdbId)\n self._stopPipeline(otdbId)\n\n \"\"\"\n More statusses we want to abort on.\n \"\"\"\n onObservationDescribed = onObservationAborted\n onObservationApproved = onObservationAborted\n onObservationPrescheduled = onObservationAborted\n onObservationConflict = onObservationAborted\n onObservationHold = onObservationAborted\n\n\nclass PipelineControl(OTDBBusListener):\n \"\"\"The OTDBBusListener is a normal BusListener listening specifically to EventMessages with OTDB notification subjects.\n It uses by default the OTDBEventMessageHandler to handle the EventMessages.\n If you want to implement your own behaviour, then derive a subclass of the OTDBEventMessageHandler, and inject that in this OTDBBusListener.\n See example at the top of this file.\n \"\"\"\n def __init__(self, handler_type: PipelineControlHandler.__class__ = PipelineControlHandler,\n handler_kwargs: dict = None,\n exchange: str = DEFAULT_BUSNAME, broker: str = DEFAULT_BROKER,\n num_threads: int = 1):\n if not issubclass(handler_type, PipelineControlHandler):\n raise TypeError(\"handler_type should be a PipelineControlHandler subclass\")\n\n if handler_kwargs is None:\n handler_kwargs = {\"exchange\": exchange, \"broker\": broker}\n\n super().__init__(handler_type=handler_type, handler_kwargs=handler_kwargs,\n exchange=exchange,\n num_threads=num_threads, broker=broker)\n\n def start_listening(self):\n # HACK: create a temporary extra handler which is not connected to this listener,\n # and hence not responding to incoming messages,\n # and use this extra handler to initially check all already scheduled pipelines\n with self._create_handler() as helper_handler:\n helper_handler.check_scheduled_pipelines()\n\n # everything has been check, now start_listening, and let the normal handlers respond to otdb events\n super().start_listening()\n\n\n\n\n\n","repo_name":"kernsuite-debian/lofar","sub_path":"MAC/Services/src/PipelineControl.py","file_name":"PipelineControl.py","file_ext":"py","file_size_in_byte":24199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3182253264","text":"menu = {\n \"Baja Taco\": 4.00,\n \"Burrito\": 7.50,\n \"Bowl\": 8.50,\n \"Nachos\": 11.00,\n \"Quesadilla\": 8.50,\n \"Super Burrito\": 8.50,\n \"Super Quesadilla\": 9.50,\n \"Taco\": 3.00,\n \"Tortilla Salad\": 8.00\n}\n\n\ndef order():\n order_total = 0.0\n try:\n while True:\n item = input(\"Item: \").title()\n if not item:\n break\n if item in menu:\n price = menu[item]\n order_total += price\n print(f\"Total: ${order_total:.2f}\")\n else:\n print(\"Invalid item\")\n except EOFError:\n pass\n print(f\"Total is ${order_total:.2f}\")\n\norder()","repo_name":"marko-gacic/CS50P","sub_path":"taqueria/taqueria.py","file_name":"taqueria.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18418558605","text":"import pdb\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn import Sequential, Linear, ReLU, Dropout\nfrom torch_geometric.nn import global_mean_pool\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier,self).__init__()\n self.dropout = 0.1\n self.layers = Sequential(\n Linear(66,256),\n ReLU(), \n Linear(256,64)\n )\n self.lin2 = Linear(64, 1)\n def forward(self, dataBA,num_nodes, num_edges, start_node, gid, checkStatus):\n '''Forward pass'''\n x,batch = dataBA.x.float(),dataBA.batch.to(dataBA.x.device)\n x = self.layers(x)\n x = F.relu(x)\n #mean pooling batch\n x = global_mean_pool(x, batch)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.lin2(x)\n return x\n","repo_name":"prasitaGit/OCTAL_Code","sub_path":"mlpExp.py","file_name":"mlpExp.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}