diff --git "a/984.jsonl" "b/984.jsonl" new file mode 100644--- /dev/null +++ "b/984.jsonl" @@ -0,0 +1,1938 @@ +{"seq_id":"74853211433","text":"import openpyxl\r\n\r\ndef escribir_archivo_excel(file_path):\r\n workbook = openpyxl.Workbook()\r\n sheet = workbook.active\r\n\r\n # Agregar encabezado\r\n sheet[\"A1\"] = \"ID_Rol\"\r\n sheet[\"B1\"] = \"Rol\"\r\n sheet[\"C1\"] = \"Valor_Jornada\"\r\n sheet[\"D1\"] = \"Horas_Jornada\"\r\n sheet[\"E1\"] = \"Valor_Hora\"\r\n\r\n # Agregar datos de ejemplo\r\n data = [\r\n (1, \"Productor\", 13000, 8),\r\n (2, \"Camarógrafo\", 27000, 12),\r\n (3, \"Editor\", 10000, 10),\r\n (4, \"Sonidista\", 10500,3)\r\n\r\n # Agrega aquí tus datos adicionales\r\n ]\r\n\r\n for row_idx, row_data in enumerate(data, start=2):\r\n for col_idx, cell_value in enumerate(row_data, start=1):\r\n sheet.cell(row = row_idx, column = col_idx, value = cell_value)\r\n sheet.cell(row = row_idx, column = 5, value = row_data[2] / row_data[3])\r\n \r\n #Vuelca data al excel\r\n\r\n\r\n\r\n\r\n # Guardar los cambios y cerrar el archivo\r\n workbook.save(file_path)\r\n workbook.close()\r\n\r\n\r\ndef leer_archivo_excel(file_path):\r\n workbook = openpyxl.load_workbook(file_path)\r\n sheet = workbook.active\r\n\r\n # Leer datos del archivo y mostrarlos en consola\r\n for row in sheet.iter_rows(values_only=True):\r\n print(row)\r\n\r\n workbook.close()\r\n\r\nif __name__ == \"__main__\":\r\n file_path = \"mandingues/excels/horas_hombre.xlsx\"\r\n\r\n # Escribir datos en el archivo\r\n escribir_archivo_excel(file_path)\r\n\r\n # Leer datos del archivo\r\n leer_archivo_excel(file_path)\r\n","repo_name":"JVseawolf/Presupuestador","sub_path":"horas_hombre.py","file_name":"horas_hombre.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24156605404","text":"from django.urls import path\n\nfrom .views import (\n entities,\n entity_children,\n entity_types,\n)\n\n\napp_name = 'entities'\n\nurlpatterns = [\n path('', entities.EntityViewSet.as_view({'post': 'create', 'get': 'list'}), name='entities'),\n path('', entities.EntityViewSet.as_view({'get': 'retrieve', 'delete': 'delete'}), name='entity-detail'),\n path('types', entity_types.EntityTypesApiView.as_view(), name='get_types'),\n path('/children', entity_children.EntityChildrenApiView.as_view(), name='entity_children'),\n]\n","repo_name":"ZaRqax/src","sub_path":"core/api/entities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31119479418","text":"#@markdown ####**Models Settings (note: For pixel art, the best is pixelartdiffusion_expanded):**\ndiffusion_model = \"512x512_diffusion_uncond_finetune_008100\" #@param [\"256x256_diffusion_uncond\", \"512x512_diffusion_uncond_finetune_008100\", \"portrait_generator_v001\", \"pixelartdiffusion_expanded\", \"pixel_art_diffusion_hard_256\", \"pixel_art_diffusion_soft_256\", \"pixelartdiffusion4k\", \"watercolordiffusion_2\", \"watercolordiffusion\", \"PulpSciFiDiffusion\", \"custom\"]\n\nuse_secondary_model = True #@param {type: 'boolean'}\ndiffusion_sampling_mode = 'ddim' #@param ['plms','ddim']\n#@markdown #####**Custom model:**\ncustom_path = '/content/drive/MyDrive/deep_learning/ddpm/ema_0.9999_058000.pt'#@param {type: 'string'}\n\n#@markdown #####**CLIP settings:**\nuse_checkpoint = True #@param {type: 'boolean'}\nViTB32 = True #@param{type:\"boolean\"}\nViTB16 = True #@param{type:\"boolean\"}\nViTL14 = False #@param{type:\"boolean\"}\nViTL14_336px = False #@param{type:\"boolean\"}\nRN101 = False #@param{type:\"boolean\"}\nRN50 = True #@param{type:\"boolean\"}\nRN50x4 = False #@param{type:\"boolean\"}\nRN50x16 = False #@param{type:\"boolean\"}\nRN50x64 = False #@param{type:\"boolean\"}\n\n#@markdown #####**OpenCLIP settings:**\nViTB32_laion2b_e16 = False #@param{type:\"boolean\"}\nViTB32_laion400m_e31 = False #@param{type:\"boolean\"}\nViTB32_laion400m_32 = False #@param{type:\"boolean\"}\nViTB32quickgelu_laion400m_e31 = False #@param{type:\"boolean\"}\nViTB32quickgelu_laion400m_e32 = False #@param{type:\"boolean\"}\nViTB16_laion400m_e31 = False #@param{type:\"boolean\"}\nViTB16_laion400m_e32 = False #@param{type:\"boolean\"}\nRN50_yffcc15m = False #@param{type:\"boolean\"}\nRN50_cc12m = False #@param{type:\"boolean\"}\nRN50_quickgelu_yfcc15m = False #@param{type:\"boolean\"}\nRN50_quickgelu_cc12m = False #@param{type:\"boolean\"}\nRN101_yfcc15m = False #@param{type:\"boolean\"}\nRN101_quickgelu_yfcc15m = False #@param{type:\"boolean\"}\n\n#@markdown If you're having issues with model downloads, check this to compare SHA's:\ncheck_model_SHA = False #@param{type:\"boolean\"}\n\ndiff_model_map = {\n '256x256_diffusion_uncond': { 'downloaded': False, 'sha': 'a37c32fffd316cd494cf3f35b339936debdc1576dad13fe57c42399a5dbc78b1', 'uri_list': ['https://openaipublic.blob.core.windows.net/diffusion/jul-2021/256x256_diffusion_uncond.pt', 'https://www.dropbox.com/s/9tqnqo930mpnpcn/256x256_diffusion_uncond.pt'] },\n '512x512_diffusion_uncond_finetune_008100': { 'downloaded': False, 'sha': '9c111ab89e214862b76e1fa6a1b3f1d329b1a88281885943d2cdbe357ad57648', 'uri_list': ['https://huggingface.co/lowlevelware/512x512_diffusion_unconditional_ImageNet/resolve/main/512x512_diffusion_uncond_finetune_008100.pt', 'https://the-eye.eu/public/AI/models/512x512_diffusion_unconditional_ImageNet/512x512_diffusion_uncond_finetune_008100.pt'] },\n 'portrait_generator_v001': { 'downloaded': False, 'sha': 'b7e8c747af880d4480b6707006f1ace000b058dd0eac5bb13558ba3752d9b5b9', 'uri_list': ['https://huggingface.co/felipe3dartist/portrait_generator_v001/resolve/main/portrait_generator_v001_ema_0.9999_1MM.pt'] },\n 'pixelartdiffusion_expanded': { 'downloaded': False, 'sha': 'a73b40556634034bf43b5a716b531b46fb1ab890634d854f5bcbbef56838739a', 'uri_list': ['https://huggingface.co/KaliYuga/PADexpanded/resolve/main/PADexpanded.pt'] },\n 'pixel_art_diffusion_hard_256': { 'downloaded': False, 'sha': 'be4a9de943ec06eef32c65a1008c60ad017723a4d35dc13169c66bb322234161', 'uri_list': ['https://huggingface.co/KaliYuga/pixel_art_diffusion_hard_256/resolve/main/pixel_art_diffusion_hard_256.pt'] },\n 'pixel_art_diffusion_soft_256': { 'downloaded': False, 'sha': 'd321590e46b679bf6def1f1914b47c89e762c76f19ab3e3392c8ca07c791039c', 'uri_list': ['https://huggingface.co/KaliYuga/pixel_art_diffusion_soft_256/resolve/main/pixel_art_diffusion_soft_256.pt'] },\n 'pixelartdiffusion4k': { 'downloaded': False, 'sha': 'a1ba4f13f6dabb72b1064f15d8ae504d98d6192ad343572cc416deda7cccac30', 'uri_list': ['https://huggingface.co/KaliYuga/pixelartdiffusion4k/resolve/main/pixelartdiffusion4k.pt'] },\n 'watercolordiffusion_2': { 'downloaded': False, 'sha': '49c281b6092c61c49b0f1f8da93af9b94be7e0c20c71e662e2aa26fee0e4b1a9', 'uri_list': ['https://huggingface.co/KaliYuga/watercolordiffusion_2/resolve/main/watercolordiffusion_2.pt'] },\n 'watercolordiffusion': { 'downloaded': False, 'sha': 'a3e6522f0c8f278f90788298d66383b11ac763dd5e0d62f8252c962c23950bd6', 'uri_list': ['https://huggingface.co/KaliYuga/watercolordiffusion/resolve/main/watercolordiffusion.pt'] },\n 'PulpSciFiDiffusion': { 'downloaded': False, 'sha': 'b79e62613b9f50b8a3173e5f61f0320c7dbb16efad42a92ec94d014f6e17337f', 'uri_list': ['https://huggingface.co/KaliYuga/PulpSciFiDiffusion/resolve/main/PulpSciFiDiffusion.pt'] },\n 'secondary': { 'downloaded': False, 'sha': '983e3de6f95c88c81b2ca7ebb2c217933be1973b1ff058776b970f901584613a', 'uri_list': ['https://huggingface.co/spaces/huggi/secondary_model_imagenet_2.pth/resolve/main/secondary_model_imagenet_2.pth', 'https://the-eye.eu/public/AI/models/v-diffusion/secondary_model_imagenet_2.pth', 'https://ipfs.pollinations.ai/ipfs/bafybeibaawhhk7fhyhvmm7x24zwwkeuocuizbqbcg5nqx64jq42j75rdiy/secondary_model_imagenet_2.pth'] },\n}\n\nkaliyuga_pixel_art_model_names = ['pixelartdiffusion_expanded', 'pixel_art_diffusion_hard_256', 'pixel_art_diffusion_soft_256', 'pixelartdiffusion4k', 'PulpSciFiDiffusion']\nkaliyuga_watercolor_model_names = ['watercolordiffusion', 'watercolordiffusion_2']\nkaliyuga_pulpscifi_model_names = ['PulpSciFiDiffusion']\ndiffusion_models_256x256_list = ['256x256_diffusion_uncond'] + kaliyuga_pixel_art_model_names + kaliyuga_watercolor_model_names + kaliyuga_pulpscifi_model_names\n\nfrom urllib.parse import urlparse\n\ndef get_model_filename(diffusion_model_name):\n model_uri = diff_model_map[diffusion_model_name]['uri_list'][0]\n model_filename = os.path.basename(urlparse(model_uri).path)\n return model_filename\n\n\ndef download_model(diffusion_model_name, uri_index=0):\n if diffusion_model_name != 'custom':\n model_filename = get_model_filename(diffusion_model_name)\n model_local_path = os.path.join(model_path, model_filename)\n if os.path.exists(model_local_path) and check_model_SHA:\n print(f'Checking {diffusion_model_name} File')\n with open(model_local_path, \"rb\") as f:\n bytes = f.read() \n hash = hashlib.sha256(bytes).hexdigest()\n if hash == diff_model_map[diffusion_model_name]['sha']:\n print(f'{diffusion_model_name} SHA matches')\n diff_model_map[diffusion_model_name]['downloaded'] = True\n else:\n print(f\"{diffusion_model_name} SHA doesn't match. Will redownload it.\")\n elif os.path.exists(model_local_path) and not check_model_SHA or diff_model_map[diffusion_model_name]['downloaded']:\n print(f'{diffusion_model_name} already downloaded. If the file is corrupt, enable check_model_SHA.')\n diff_model_map[diffusion_model_name]['downloaded'] = True\n\n if not diff_model_map[diffusion_model_name]['downloaded']:\n for model_uri in diff_model_map[diffusion_model_name]['uri_list']:\n wget(model_uri, model_path)\n if os.path.exists(model_local_path):\n diff_model_map[diffusion_model_name]['downloaded'] = True\n return\n else:\n print(f'{diffusion_model_name} model download from {model_uri} failed. Will try any fallback uri.')\n print(f'{diffusion_model_name} download failed.')\n\n\n# Download the diffusion model(s)\ndownload_model(diffusion_model)\nif use_secondary_model:\n download_model('secondary')\n\nmodel_config = model_and_diffusion_defaults()\nif diffusion_model == '512x512_diffusion_uncond_finetune_008100':\n model_config.update({\n 'attention_resolutions': '32, 16, 8',\n 'class_cond': False,\n 'diffusion_steps': 1000, #No need to edit this, it is taken care of later.\n 'rescale_timesteps': True,\n 'timestep_respacing': 250, #No need to edit this, it is taken care of later.\n 'image_size': 512,\n 'learn_sigma': True,\n 'noise_schedule': 'linear',\n 'num_channels': 256,\n 'num_head_channels': 64,\n 'num_res_blocks': 2,\n 'resblock_updown': True,\n 'use_checkpoint': use_checkpoint,\n 'use_fp16': not useCPU,\n 'use_scale_shift_norm': True,\n })\nelif diffusion_model == '256x256_diffusion_uncond':\n model_config.update({\n 'attention_resolutions': '32, 16, 8',\n 'class_cond': False,\n 'diffusion_steps': 1000, #No need to edit this, it is taken care of later.\n 'rescale_timesteps': True,\n 'timestep_respacing': 250, #No need to edit this, it is taken care of later.\n 'image_size': 256,\n 'learn_sigma': True,\n 'noise_schedule': 'linear',\n 'num_channels': 256,\n 'num_head_channels': 64,\n 'num_res_blocks': 2,\n 'resblock_updown': True,\n 'use_checkpoint': use_checkpoint,\n 'use_fp16': not useCPU,\n 'use_scale_shift_norm': True,\n })\nelif diffusion_model == 'portrait_generator_v001':\n model_config.update({\n 'attention_resolutions': '32, 16, 8',\n 'class_cond': False,\n 'diffusion_steps': 1000,\n 'rescale_timesteps': True,\n 'image_size': 512,\n 'learn_sigma': True,\n 'noise_schedule': 'linear',\n 'num_channels': 128,\n 'num_heads': 4,\n 'num_res_blocks': 2,\n 'resblock_updown': True,\n 'use_checkpoint': use_checkpoint,\n 'use_fp16': True,\n 'use_scale_shift_norm': True,\n })\nelse: # E.g. A model finetuned by KaliYuga\n model_config.update({\n 'attention_resolutions': '16',\n 'class_cond': False,\n 'diffusion_steps': 1000,\n 'rescale_timesteps': True,\n 'timestep_respacing': 'ddim100',\n 'image_size': 256,\n 'learn_sigma': True,\n 'noise_schedule': 'linear',\n 'num_channels': 128,\n 'num_heads': 1,\n 'num_res_blocks': 2,\n 'use_checkpoint': use_checkpoint,\n 'use_fp16': True,\n 'use_scale_shift_norm': False,\n })\n\nmodel_default = model_config['image_size']\n\nif use_secondary_model:\n secondary_model = SecondaryDiffusionImageNet2()\n secondary_model.load_state_dict(torch.load(f'{model_path}/secondary_model_imagenet_2.pth', map_location='cpu'))\n secondary_model.eval().requires_grad_(False).to(device)\n\nclip_models = []\nif ViTB32: clip_models.append(clip.load('ViT-B/32', jit=False)[0].eval().requires_grad_(False).to(device))\nif ViTB16: clip_models.append(clip.load('ViT-B/16', jit=False)[0].eval().requires_grad_(False).to(device))\nif ViTL14: clip_models.append(clip.load('ViT-L/14', jit=False)[0].eval().requires_grad_(False).to(device))\nif ViTL14_336px: clip_models.append(clip.load('ViT-L/14@336px', jit=False)[0].eval().requires_grad_(False).to(device))\nif RN50: clip_models.append(clip.load('RN50', jit=False)[0].eval().requires_grad_(False).to(device))\nif RN50x4: clip_models.append(clip.load('RN50x4', jit=False)[0].eval().requires_grad_(False).to(device))\nif RN50x16: clip_models.append(clip.load('RN50x16', jit=False)[0].eval().requires_grad_(False).to(device))\nif RN50x64: clip_models.append(clip.load('RN50x64', jit=False)[0].eval().requires_grad_(False).to(device))\nif RN101: clip_models.append(clip.load('RN101', jit=False)[0].eval().requires_grad_(False).to(device))\nif ViTB32_laion2b_e16: clip_models.append(open_clip.create_model('ViT-B-32', pretrained='laion2b_e16').eval().requires_grad_(False).to(device))\nif ViTB32_laion400m_e31: clip_models.append(open_clip.create_model('ViT-B-32', pretrained='laion400m_e31').eval().requires_grad_(False).to(device))\nif ViTB32_laion400m_32: clip_models.append(open_clip.create_model('ViT-B-32', pretrained='laion400m_e32').eval().requires_grad_(False).to(device))\nif ViTB32quickgelu_laion400m_e31: clip_models.append(open_clip.create_model('ViT-B-32-quickgelu', pretrained='laion400m_e31').eval().requires_grad_(False).to(device))\nif ViTB32quickgelu_laion400m_e32: clip_models.append(open_clip.create_model('ViT-B-32-quickgelu', pretrained='laion400m_e32').eval().requires_grad_(False).to(device))\nif ViTB16_laion400m_e31: clip_models.append(open_clip.create_model('ViT-B-16', pretrained='laion400m_e31').eval().requires_grad_(False).to(device))\nif ViTB16_laion400m_e32: clip_models.append(open_clip.create_model('ViT-B-16', pretrained='laion400m_e32').eval().requires_grad_(False).to(device))\nif RN50_yffcc15m: clip_models.append(open_clip.create_model('RN50', pretrained='yfcc15m').eval().requires_grad_(False).to(device))\nif RN50_cc12m: clip_models.append(open_clip.create_model('RN50', pretrained='cc12m').eval().requires_grad_(False).to(device))\nif RN50_quickgelu_yfcc15m: clip_models.append(open_clip.create_model('RN50-quickgelu', pretrained='yfcc15m').eval().requires_grad_(False).to(device))\nif RN50_quickgelu_cc12m: clip_models.append(open_clip.create_model('RN50-quickgelu', pretrained='cc12m').eval().requires_grad_(False).to(device))\nif RN101_yfcc15m: clip_models.append(open_clip.create_model('RN101', pretrained='yfcc15m').eval().requires_grad_(False).to(device))\nif RN101_quickgelu_yfcc15m: clip_models.append(open_clip.create_model('RN101-quickgelu', pretrained='yfcc15m').eval().requires_grad_(False).to(device))\n\nnormalize = T.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711])\nlpips_model = lpips.LPIPS(net='vgg').to(device)","repo_name":"mnm458/ai-disco-diffusion-files","sub_path":"Test1/clipSetting.py","file_name":"clipSetting.py","file_ext":"py","file_size_in_byte":13553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3587149098","text":"from psutil import net_io_counters, disk_io_counters, disk_usage, \\\n virtual_memory, cpu_percent, process_iter\n\n\ndef total_perf():\n \"\"\"\n 整机性能数据记录\n :return:\n \"\"\"\n return (cpu_percent(),\n virtual_memory().percent,\n disk_usage('/').percent)\n # 预留功能\n # disk_io_counters().read_count,\n # disk_io_counters().write_count,\n # net_io_counters().packets_sent,\n # net_io_counters().packets_recv))\n\n\ndef proc_perf(pname):\n \"\"\"\n 单进程性能记录\n :param pname:\n :return:\n \"\"\"\n target = [p for p in process_iter(attrs=['pid', 'name']) if\n pname.lower() in p.info['name'].lower()]\n result = []\n for p in target:\n with p.oneshot():\n result.append((p.ppid(), p.username(), p.cpu_percent(),\n p.memory_percent(), p.name()))\n return result\n","repo_name":"Vancheung/TestEngineering","sub_path":"_1_Programming/Python/Samaritan/src/slave/perf.py","file_name":"perf.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"30275303620","text":"from datadog.dogstatsd import DogStatsd\n\nimport os\nimport time\nimport re\nimport urlparse\nfrom os.path import splitext\n\n\nclass StatsdMiddleware(object):\n def __init__(self, app, statsd_host='localhost', statsd_port='8125',\n statsd_prefix='openstack', statsd_replace='id'):\n self.app = app\n self.replace_strategy = _ReplaceStrategy(os.getenv('STATSD_REPLACE', statsd_replace))\n self.client = DogStatsd(\n host=os.getenv('STATSD_HOST', statsd_host),\n port=int(os.getenv('STATSD_PORT', statsd_port)),\n namespace=os.getenv('STATSD_PREFIX', statsd_prefix)\n )\n\n @classmethod\n def factory(cls, global_config, **local_config):\n def _factory(app):\n return cls(app, **local_config)\n\n return _factory\n\n def process_response(self, start, environ, response_wrapper,\n exception=None):\n self.client.increment('responses_total')\n\n status = response_wrapper.get('status')\n if status:\n status_code = status.split()[0]\n else:\n status_code = 'none'\n\n method = environ['REQUEST_METHOD']\n\n # cleanse request path\n path = urlparse.urlparse(environ['SCRIPT_NAME'] +\n environ['PATH_INFO']).path\n # strip extensions\n path = splitext(path)[0]\n\n # replace parts of the path with constants based on strategy\n path = self.replace_strategy.apply(path)\n\n parts = path.rstrip('\\/').split('/')\n if exception:\n parts.append(exception.__class__.__name__)\n api = '/'.join(parts)\n\n self.client.timing('latency_by_api',\n time.time() - start,\n tags=[\n 'method:%s' % method,\n 'api:%s' % api\n ])\n\n self.client.increment('responses_by_api',\n tags=[\n 'method:%s' % method,\n 'api:%s' % api,\n 'status:%s' % status_code\n ])\n\n def __call__(self, environ, start_response):\n response_interception = {}\n\n def start_response_wrapper(status, response_headers,\n exc_info=None):\n response_interception.update(status=status,\n response_headers=response_headers,\n exc_info=exc_info)\n return start_response(status, response_headers, exc_info)\n\n start = time.time()\n try:\n self.client.open_buffer()\n self.client.increment('requests_total')\n\n response = self.app(environ, start_response_wrapper)\n try:\n for event in response:\n yield event\n finally:\n if hasattr(response, 'close'):\n response.close()\n\n self.process_response(start, environ, response_interception)\n except Exception as exception:\n self.process_response(start, environ, response_interception,\n exception)\n raise\n finally:\n self.client.close_buffer()\n\n\nclass _ReplaceStrategy(object):\n def __init__(self, strategies='id'):\n self._strategies = []\n\n # Expecting strings from config like 'id, swift' or 'swift' or 'Id,sWIFT'\n strategies = strategies.lower().replace(' ', '')\n\n for strategy in strategies.split(','):\n if strategy == 'id':\n self._strategies.append(_ReplaceStrategyId())\n elif strategy == 'swift':\n self._strategies.append(_ReplaceStrategySwift())\n\n def apply(self, path):\n # Iterate over replacement strategies and apply the substitutions\n for strategy in self._strategies:\n path = strategy.replace(path)\n return path\n\n\nclass _ReplaceStrategyId(object):\n def __init__(self):\n self._regex = re.compile(r'/[0-9a-fA-F-]+/')\n\n def replace(self, path):\n # replace identifiers with constant\n return self._regex.sub('/id/', path + '/')\n\n\nclass _ReplaceStrategySwift(object):\n def __init__(self):\n self._regex = re.compile(r'(\\S+AUTH_)([p\\-]?[0-9a-fA-F-]+/?)([^ /\\t\\n\\r\\f\\v]+/?)?(\\S*)?')\n\n def replace(self, path):\n # Transform Swift path from\n # /v1/AUTH_0123456789/container-name/pseudo-folder/object-name\n # to constants\n # /v1/AUTH_account/container/object\n m = self._regex.match(path)\n if m:\n # Replace account id with constant\n path = m.group(1) + 'account'\n if m.group(3) and m.group(3) != '':\n # Replace container name with constant\n path += '/container'\n if m.group(4) and m.group(4) != '':\n # Replace object name with constant\n path += '/object'\n path += '/'\n\n return path\n","repo_name":"sapcc/openstack-ops-middleware","sub_path":"opsmiddleware/metrics/statsd.py","file_name":"statsd.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8734582546","text":"#problem 1079 / letter tile possibilities\nclass Solution(object):\n def numTilePossibilities(self, tiles):\n \"\"\"\n :type tiles: str\n :rtype: int\n \"\"\"\n res = {''}\n for t in tiles:\n res |= {d[:i] + t + d[i:] for d in res for i in range(len(d) + 1)}\n return len(res)-1\n\n#recursion\nclass Solution:\n def numTilePossibilities(self, tiles: str) -> int:\n if not tiles or len(tiles) == 1:\n return len(tiles)\n \n H = collections.Counter(tiles)\n count = 0\n for remaining in range(1, len(tiles) + 1):\n tmp_res = self.helper(H, remaining)\n count += tmp_res\n return count\n \n def helper(self, H, remaining):\n if remaining == 0:\n return 1\n \n count = 0\n for key in H.keys():\n if H[key] <= 0:\n continue\n H[key] -= 1\n count += self.helper(H, remaining - 1)\n H[key] += 1\n return count","repo_name":"digitalladder/leetcode","sub_path":"problem1079.py","file_name":"problem1079.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8868116835","text":"'''\nCreated on 15 déc. 2021\n\n@author: slinux\n'''\n\nfrom .wxRavenGeneralDesign import *\n\n\nimport wx.lib as lib\nimport wx.lib.mixins.listctrl as listmix\n\n\n\n\nimport wx.aui\n\n\n\nimport wx.lib.mixins.listctrl as listmix \n\n\nclass RavenErrorLogConsole(wxRavenErrorLogConsolePanel, listmix.ColumnSorterMixin):\n '''\n classdocs\n '''\n\n\n view_base_name = \"Error Log Console\"\n view_name = \"Error Log Console\"\n parent_frame = None\n default_position = \"mgr\"\n \n \n #icon = wx.Bitmap( u\"res/default_style/normal/error_log.png\", wx.BITMAP_TYPE_ANY )\n icon = \"error_console\"\n \n \n allIcons = {}\n \n message_type_mapping = {}\n \n \n\n def __init__(self, parentFrame, position = \"toolbox1\", viewName= \"Error Log Console\"):\n '''\n Constructor\n '''\n super().__init__(parent=parentFrame)\n self.view_base_name = \"Error Log Console\"\n self.view_name = viewName\n self.parent_frame = parentFrame\n self.default_position = position\n \n self._display = [] \n #self._msg = True\n #self._warning = True\n #self._error = True\n self._DebugWindow = None\n \n self.itemDataMap = {}\n self.allIcons = { }\n self.message_type_mapping = {}\n self._listInit = False\n \n self.InitBasicMapping()\n self.InitPlugingAndVariousMapping()\n \n self.InitConsoleLog()\n self.InitToolbar()\n \n parentFrame.RessourcesProvider.ApplyThemeOnPanel(self)\n parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))\n \n #parentFrame.Bind( wx.aui.EVT_AUI_PANE_RESTORE, self.OnAuiPaneRestore )\n #parentFrame.Bind( wx.aui.EVT_AUI_PANE_ACTIVATED, self.OnAuiPaneActivated )\n #parentFrame.Bind( wx.aui.EVT_AUI_RENDER, self.OnAuiPaneRender )\n #parentFrame.Bind( wx.aui.EVT_AUI_PANE_CLOSE, self.OnAuiPaneClose )\n \n self._logCurrentCursor = -1\n \n #self.dummyTest()\n \n self.SetAutoLayout(True)\n \n \n #self.m_auiToolBar1.ToggleTool(self.m_showWarnings.GetId(), True)\n \n \"\"\" \n def OnAuiPaneClose(self, evt):\n print(\"OnAuiPaneClose in console log\")\n \n def OnAuiPaneRestore(self, evt):\n print(\"OnAuiPaneRestore in console log\")\n \n def OnAuiPaneRender(self, evt):\n print(\"OnAuiPaneRender in console log\") \n \n def OnAuiPaneActivated(self, evt):\n print(\"OnAuiPaneActivated in console log\") \n \n \"\"\"\n \n \n \n def InitBasicMapping(self):\n self.message_type_mapping['info'] = 'info'\n \n self.message_type_mapping['message'] = 'msg'\n self.message_type_mapping['msg'] = 'msg'\n \n self.message_type_mapping['warning'] = 'warning'\n \n self.message_type_mapping['error'] = 'error'\n \n self.message_type_mapping['debug'] = 'debug'\n \n \n \n def InitPlugingAndVariousMapping(self):\n self.message_type_mapping['dbsync_inprogress'] = 'info'\n self.message_type_mapping['db'] = 'info'\n self.message_type_mapping['dbsync_done'] = 'msg'\n self.message_type_mapping['db_warning'] = 'warning'\n self.message_type_mapping['db_check'] = 'info'\n \n \n self.message_type_mapping['process_run'] = 'msg'\n self.message_type_mapping['process_stop'] = 'msg'\n self.message_type_mapping['process_pause'] = 'msg'\n self.message_type_mapping['process_warning'] = 'warning'\n\n \n \n \n def __getMessageTypeFromMapping__(self, iconame):\n retType = 'info'\n \n if self.message_type_mapping.__contains__(iconame):\n retType = self.message_type_mapping[iconame]\n #print(\"MAPPING FOUND !\")\n \n \n return retType\n \n \n def ClearLogs(self):\n self.m_listCtrl1.Freeze()\n self.m_listCtrl1.DeleteAllItems() \n self.m_listCtrl1.Thaw()\n \n \n def ResetCursorAndCache(self):\n self._logCurrentCursor = 0\n self.itemDataMap = {}\n \n \n \n '''\n \n Virtualization Update\n \n '''\n def UpdateList(self, evt=None): \n self.itemDataMap = {}\n self.cursor = 0\n \n \n #\n # TO DO : GENERATE THE LIST\n #\n \n \n \n self.m_listCtrl1.SetItemCount(len(self.itemDataMap))\n self.m_listCtrl1.Refresh()\n \n \n \n def OnGetItemText(self, item, col):\n #return \"Item %d, column %d\" % (item, col)\n #print(f'OnGetItemText {item} {col}')\n return self.itemDataMap[item][col]\n\n def OnGetItemImage(self, item):\n _datas = self._datacache[item]\n _icon = self.allIcons['info']\n if self.allIcons.__contains__(_datas[0].lower()):\n _icon= self.allIcons[_datas[0].lower()]\n return _icon\n\n\n def OnGetItemAttr(self, item):\n return None\n \n \n \n \n \n def UpdateView(self):\n \n \n #print(\"RavenErrorLogConsole Update !\")\n try:\n _allLogs = self.parent_frame.GetPluginData(\"General\",\"allLogs\").copy()\n \n self.m_listCtrl1.Freeze()\n \n _currentViewCursor = self._logCurrentCursor\n #self.itemDataMap = {}\n items = _allLogs.items()\n for key, data in items:\n #print(str(key) + \" VS\" + str(_currentViewCursor) )\n if key>_currentViewCursor:\n #print(\"newrow\")\n \n \n _icon = self.allIcons['info']\n \n \n _type=self.__getMessageTypeFromMapping__(data[0])\n #print(f\"{data[0]} == {_type}\")\n #print(_type)\n #print(self._display) \n \n _foundInFilter=False\n \n if _type in self._display:\n _foundInFilter = True\n \"\"\"\n for _t in self._display:\n print(f\"cehck display {_t}\") \n if _type.__contains__(_t):\n \n _foundInFilter=True\n \"\"\"\n \n \n #if not _foundInFilter:\n #pass\n #print(f\"not found\") \n #continue\n \n \n \n if self.allIcons.__contains__(data[0].lower()):\n _icon= self.allIcons[data[0].lower()]\n \n \n \n if _foundInFilter:\n \n index = self.m_listCtrl1.InsertItem(self.m_listCtrl1.GetItemCount(), data[1], _icon )\n \n #item = self.m_listCtrl1.GetItem(index)\n #item.SetColumn(1)\n #item.SetText('John')\n self.m_listCtrl1.SetItem(index,1, data[2])\n self.m_listCtrl1.SetItem(index,2, data[3])\n #item1.SetColumn(1)\n #self.m_listCtrl1.SetItem(item)\n self.m_listCtrl1.SetItemData(index, _currentViewCursor)\n \n self.itemDataMap[_currentViewCursor] = (str(data[1]), str(data[2]), str(data[3]) )\n \n \n _currentViewCursor = _currentViewCursor +1\n \n self._logCurrentCursor = _currentViewCursor\n \n \n self.m_listCtrl1.SetColumnWidth(0, wx.LIST_AUTOSIZE)\n self.m_listCtrl1.SetColumnWidth(1, 300)\n self.m_listCtrl1.SetColumnWidth(2, wx.LIST_AUTOSIZE)\n \n if not self._listInit:\n listmix.ColumnSorterMixin.__init__(self, 3)\n self._listInit = True\n \n listmix.ColumnSorterMixin.SortListItems(self, col=2, ascending=0)\n \n self.m_listCtrl1.Thaw()\n \n \n self.RefreshToolbarState()\n \n \n \n self.SetAutoLayout(True)\n self.Layout()\n except Exception as e:\n pass\n \n \n \n def dummyTest(self):\n \n \n fakelog = {\n 1 : [\"error\",\"error in bfdsdf\", \"ffrgfdg\", \"12.00PM\"],\n 2 : [\"info\",\"DNA featudsfsdring Suzanne Vega\", \"Tom's Diner\", \"12.00PM\"],\n }\n\n print(self.m_listCtrl1.GetColumnCount())\n #self.m_listCtrl1.GetColumnIndexFromOrder(1)\n\n items = fakelog.items()\n for key, data in items:\n index = self.m_listCtrl1.InsertItem(self.m_listCtrl1.GetItemCount(), data[1],self.allIcons[data[0]] )\n \n #item = self.m_listCtrl1.GetItem(index)\n #item.SetColumn(1)\n #item.SetText('John')\n self.m_listCtrl1.SetItem(index,1, data[2])\n self.m_listCtrl1.SetItem(index,2, data[3])\n #item1.SetColumn(1)\n #self.m_listCtrl1.SetItem(item)\n self.m_listCtrl1.SetItemData(index, key)\n \n \n \n #items = musicdata.items()\n #for key, data in items:\n # index = self.m_dataViewCtrl1.InsertItem(self.m_dataViewCtrl1.GetItemCount(), data)\n #self.list.SetItem(index, 1, data[1])\n #self.list.SetItem(index, 2, data[2])\n # self.list.SetItemData(index, key)\n \n \n # Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py\n def GetListCtrl(self):\n return self.m_listCtrl1\n \n # Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py\n def GetSortImages(self):\n return (self.allIcons['alphab_down'], self.allIcons['alphab_up']) \n \n \n \n \n \n def InitToolbar(self):\n \n myPlugin = self.parent_frame.GetPlugin(\"General\")\n \n alloptions = myPlugin.PLUGIN_SETTINGS['showerror']\n \n if alloptions.__contains__('debug'):\n self.m_auiToolBar1.ToggleTool(self.m_showDebug.GetId(), True)\n \n if alloptions.__contains__('info'):\n self.m_auiToolBar1.ToggleTool(self.m_showInfos.GetId(), True)\n \n if alloptions.__contains__('message') or alloptions.__contains__('msg') :\n self.m_auiToolBar1.ToggleTool(self.m_showMessages.GetId(), True)\n \n if alloptions.__contains__('warning'):\n self.m_auiToolBar1.ToggleTool(self.m_showWarnings.GetId(), True)\n \n if alloptions.__contains__('error'):\n self.m_auiToolBar1.ToggleTool(self.m_showErrors.GetId(), True)\n \n self._display = alloptions\n \n \n \n \n def RefreshToolbarState(self):\n #print(self._DebugWindow)\n \n _iv = self.parent_frame.Views.isViewVisible(\"Debug\")\n #print(f\"iv = {_iv}\")\n if not _iv:\n self.m_auiToolBar1.ToggleTool(self.m_showDebug.GetId(), False)\n \n def ActivateDebugWindow(self):\n print(\"debug console turned on !\")\n #if self._DebugWindow == None:\n # print(\"debug mode was not init yet!\")\n #self._DebugWindow = self.parent_frame.Views.OpenView(\"Debug\", \"General\", True)['instance']\n self.parent_frame.Views.OpenView(\"Debug\", \"General\", True)\n #Debug\n #self._DebugWindow = wx.LogWindow(self.parent_frame, \"Debug\", show=False)\n #self.parent_frame.Add(self._DebugWindow.GetFrame(), \"Debug\", icon=self.parent_frame.RessourcesProvider.GetImage('debug_exc'))\n #self._DebugWindow.Show(show=True)\n \n def OnViewOptionsChanged(self, evt):\n #GetToolToggled\n myPlugin = self.parent_frame.GetPlugin(\"General\")\n alloptions = []\n \n if self.m_auiToolBar1.GetToolToggled(self.m_showDebug.GetId()):\n alloptions.append('debug')\n self.ActivateDebugWindow()\n \n if self.m_auiToolBar1.GetToolToggled(self.m_showInfos.GetId()):\n alloptions.append('info')\n \n \n if self.m_auiToolBar1.GetToolToggled(self.m_showMessages.GetId()):\n alloptions.append('msg')\n \n \n if self.m_auiToolBar1.GetToolToggled(self.m_showWarnings.GetId()):\n alloptions.append('warning')\n \n \n if self.m_auiToolBar1.GetToolToggled(self.m_showErrors.GetId()):\n alloptions.append('error')\n \n \n self._display = alloptions\n \n self.ResetCursorAndCache()\n self.ClearLogs()\n self.UpdateView()\n \n myPlugin.PLUGIN_SETTINGS['showerror'] = alloptions\n \n \n \n def InitConsoleLog(self):\n \n info = wx.ListItem()\n info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT\n info.Image = -1\n info.Align = 0\n info.Text = \"Message\"\n self.m_listCtrl1.InsertColumn(0, info)\n\n info.Align = 0#wx.LIST_FORMAT_RIGHT\n info.Text = \"Source\"\n self.srcCol = self.m_listCtrl1.InsertColumn(1, info)\n\n info.Align = wx.LIST_FORMAT_RIGHT\n info.Text = \"Date\"\n self.m_listCtrl1.InsertColumn(2, info)\n \n \n \n \n \n self.m_listCtrl1.SetColumnWidth(0, 200)\n self.m_listCtrl1.SetColumnWidth(1, 100)\n self.m_listCtrl1.SetColumnWidth(2, 50)\n \n \n self.il = wx.ImageList(16, 16)\n \n \"\"\"\n self.allIcons['error'] = self.il.Add(wx.Bitmap( u\"res/default_style/normal/error_tsk.png\", wx.BITMAP_TYPE_ANY ))\n self.allIcons['info'] = self.il.Add(wx.Bitmap( u\"res/default_style/normal/info_obj.png\", wx.BITMAP_TYPE_ANY ))\n self.allIcons['msg'] = self.il.Add(wx.Bitmap( u\"res/default_style/normal/help_view.png\", wx.BITMAP_TYPE_ANY ))\n self.allIcons['warning'] = self.il.Add(wx.Bitmap( u\"res/default_style/normal/warning_obj.png\", wx.BITMAP_TYPE_ANY ))\n \"\"\"\n \n self.allIcons['debug'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('debug_exc') )\n self.allIcons['error'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('error_tsk') )\n self.allIcons['info'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('info_obj') )\n self.allIcons['msg'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('help_view') )\n self.allIcons['warning'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('warning_obj') )\n \n self.allIcons['dbsync_inprogress'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('repository-synchronize') )\n self.allIcons['db'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('repository-blue') )\n self.allIcons['dbsync_done'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('repository_sync_done') )\n self.allIcons['db_warning'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('repository_warning') )\n self.allIcons['db_check'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('repository_check') )\n \n \n self.allIcons['process_run'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('process_run') )\n self.allIcons['process_stop'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('process_stop') )\n self.allIcons['process_pause'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('process_pause') )\n self.allIcons['process_warning'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('process_pause') )\n \n self.allIcons['alphab_up'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('alphab_sort_up') )\n self.allIcons['alphab_down'] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage('alphab_sort_co') )\n \n \n \n self.m_listCtrl1.SetImageList(self.il, wx.IMAGE_LIST_SMALL)\n \n \n #listmix.ListCtrlAutoWidthMixin.__init__(self.m_listCtrl1)\n \n \n #self.setResizeColumn(0)\n #self.bSizer1.Add(self.list, 1, wx.EXPAND)\n \n \n def AddImageInConsole(self, imgname,iconname,logtype=\"info\" ): \n self.allIcons[iconname] = self.il.Add( self.parent_frame.RessourcesProvider.GetImage(imgname) )\n self.message_type_mapping['iconname'] = logtype\n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"sLiinuX/wxRaven","sub_path":"plugins/General/wxRavenErrorLogConsoleLogic.py","file_name":"wxRavenErrorLogConsoleLogic.py","file_ext":"py","file_size_in_byte":16853,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"13994412084","text":"def remove_last(array):\n if len(array) > 0:\n array.pop()\n\n\nentry = input()\nnumbers = []\nwhile entry != 'end':\n if entry.isdigit():\n numbers.append(int(entry))\n elif entry == 'M':\n print(*numbers, sep='')\n elif entry == 'D':\n remove_last(numbers)\n elif len(entry) > 1:\n command = entry.split()\n action = command[0]\n position = int(command[1])\n if position <= len(numbers):\n if position == 1:\n remove_last(numbers)\n elif action == 'B':\n numbers.pop(position * -1)\n elif action == 'C':\n numbers = numbers[0:(position * -1):1]\n entry = input()\n","repo_name":"o-andres-u/EstructurasDatos","sub_path":"modulo_5/ejercicio_3.py","file_name":"ejercicio_3.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26256104585","text":"def first(a):\r\n if type(a) == list or type(a) == tuple:\r\n return first(a[0])\r\n return a\r\ndef last(a):\r\n if type(a) == list or type(a) == tuple:\r\n return last(a[-1])\r\n return a\r\ndef extract(a, reverse = False):\r\n if reverse:\r\n return last(a)\r\n else:\r\n return first(a)\r\ndef max_num(lista, reverse = False):\r\n maior = lista[0]\r\n for num in lista:\r\n if extract(num, reverse) > extract(maior, reverse):\r\n maior = num\r\n return maior\r\ndef min_num(lista, reverse = False):\r\n menor = lista[0]\r\n for num in lista:\r\n if extract(num, reverse) < extract(menor, reverse):\r\n menor = num\r\n return menor\r\ndef sort_last(lista, reverse = False):\r\n maior = max_num(lista, reverse)\r\n antigo_menor = extract(min_num(lista, reverse), reverse) - 1\r\n lista_ordenada = []\r\n for i in lista:\r\n menor = maior\r\n for j in lista:\r\n if extract(j, reverse) < extract(menor, reverse) and extract(j, reverse) > extract(antigo_menor, reverse):\r\n menor = j\r\n lista_ordenada.append(menor)\r\n antigo_menor = menor\r\n\r\n return lista_ordenada","repo_name":"ryukinix/pyzumbi-amazon","sub_path":"exercícios/Lista XIII/max_min_sort.py","file_name":"max_min_sort.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33366759216","text":"import random\nn = 100\nnumbers = list(range(1,n+1))\nx = numbers.pop(random.randint(1,n+1))\nprint(x)\n# find x\n\n# 1st way\n# for i in range(1,n+1):\n# if i not in numbers:\n# print(i)\n# break\n\n\n# 2nd way\ntotal = n * (n+1) // 2\nprint(total - sum(numbers))","repo_name":"melibayev/Python_lessons","sub_path":"Python_program/FindMissingNumber.py","file_name":"FindMissingNumber.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29737814719","text":"\"\"\" flask api tools and workout app variables \"\"\"\nfrom flask import request, jsonify\nfrom workout import app, db\nfrom workout.model.workout_model import WorkoutModel\n\n\n@app.route(\"/api/workouts\", methods=['POST', 'GET'])\ndef route_workouts():\n \"\"\" workout api CRUD operations \"\"\"\n if request.method == 'GET':\n return \"Getting all workouts\"\n\n if request.method == 'POST':\n name = request.form['name']\n description = request.form['description']\n # creating workout object\n workout = WorkoutModel(name=name, description=description)\n\n # save on database\n db.session.add(workout)\n db.session.commit()\n\n return f'workout {name} was created'\n\n return \"This route does not exist\"\n\n\n@app.route(\"/api/workout/\", methods=['POST', 'GET', 'DELETE', 'PUT'])\ndef route_workout_id(workout_id):\n \"\"\" specific workout with ID api CRUD operations \"\"\"\n if request.method == 'GET':\n data = {}\n result = None\n\n try:\n result = db.session.execute(\n db.select(WorkoutModel).filter_by(id=workout_id)).one()\n except:\n return \"workout not found\"\n\n for workout in result:\n data = {\"workoutName\": workout.name,\n \"description\": workout.description}\n\n return jsonify(data)\n\n if request.method == 'DELETE':\n result = None\n\n try:\n result = db.session.execute(\n db.select(WorkoutModel).filter_by(id=workout_id)).one()\n except:\n return \"workout was not found\"\n\n for workout in result:\n db.session.delete(workout)\n db.session.commit()\n\n return \"deleting a workout\"\n\n if request.method == 'PUT':\n data_list = request.json\n\n name = None\n result = None\n\n try:\n result = db.session.execute(\n db.select(WorkoutModel).filter_by(id=workout_id)).one()\n except:\n return \"workout was not found\"\n\n for items in data_list:\n field = items.get(\"field\")\n new_value = items.get(\"newValue\")\n\n for workout in result:\n name = workout.name\n if field == \"name\":\n workout.name = new_value\n elif field == \"description\":\n workout.description = new_value\n\n db.session.commit()\n\n return f'workout {name} was updated'\n\n return \"route does not exist\"\n","repo_name":"Simple-Tech-Services/fitness-api","sub_path":"workout/controller/workouts.py","file_name":"workouts.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"30750723460","text":"# 프로그래머스 - 코딩테스트 연습 > 위클리 챌린지 > 전력망을 둘로 나누기\ndef dfs(wire_dict, root):\n\tvisit, stack = [], []\n\tstack.append(root)\n\twhile stack:\n\t\tnode = stack.pop()\n\t\tif node not in visit:\n\t\t\tvisit.append(node)\n\t\t\tstack.extend(wire_dict[node])\n\n\treturn visit\n\ndef get_nodes_diff(wires, idx):\n\tfrom collections import defaultdict\n\twire_dict = defaultdict(list)\n\tfor wire in (wires[:idx] + wires[idx+1:]):\n\t\twire_dict[wire[0]].append(wire[1])\n\t\twire_dict[wire[1]].append(wire[0])\n\n\tleft_nodes = len(dfs(wire_dict, wires[idx][0]))\n\tright_nodes = len(dfs(wire_dict, wires[idx][1]))\n\n\tif left_nodes > right_nodes:\n\t\treturn left_nodes - right_nodes\n\telse:\n\t\treturn right_nodes - left_nodes\n\n\ndef solution(n, wires):\n\tanswer = len(wires)\n\twires.sort(key = lambda x: x)\n\t\n\tfor idx, wire in enumerate(wires):\n\t\ttmp = get_nodes_diff(wires, idx)\n\t\tif tmp < answer:\n\t\t\tanswer = tmp\n\n\treturn answer","repo_name":"lagunerio/daily-algorithm","sub_path":"programmers/python/20220321-2.py","file_name":"20220321-2.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15028561068","text":"from gurobipy import *\r\nfrom sympy import exp,pi\r\nimport math\r\n\r\ndef phi(x):\r\n # constants\r\n a1 = 0.254829592\r\n a2 = -0.284496736\r\n a3 = 1.421413741\r\n a4 = -1.453152027\r\n a5 = 1.061405429\r\n p = 0.3275911\r\n\r\n # Save the sign of x\r\n sign = 1\r\n if x < 0:\r\n sign = -1\r\n x = abs(x)/math.sqrt(2.0)\r\n\r\n # A&S formula 7.1.26\r\n t = 1.0/(1.0 + p*x)\r\n y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)\r\n\r\n return 0.5*(1.0 + sign*y)\r\n\r\ndef newton(sigma,mean,s):\r\n x={}\r\n fx={}\r\n dfx={}\r\n x[0]=s\r\n ite=3000\r\n for i in range(0,ite):\r\n z=(x[i]-mean)/sigma\r\n a=phi(z)\r\n temp_fx=x[i]-(x[i]-mean)*(a)-(sigma*exp(-0.5*z*z))/((2*pi)**0.5)-s\r\n fx[i]=float(temp_fx)\r\n temp_dfx=1-a\r\n dfx[i]=float(temp_dfx)\r\n x[i+1]=x[i]-fx[i]/dfx[i]\r\n if fx[i]*fx[i]<0.00000000001:\r\n break\r\n return x[len(x)-1]\r\n\r\ndef solve(station_num,fare_type,capacity,fare,mean,sigma):\r\n m = Model('IP')\r\n sold = m.addVars(fare_type, station_num, station_num, lb=0, vtype=GRB.CONTINUOUS, name=\"sold\")\r\n m.addConstrs((quicksum(\r\n sold[f, i, j] for f in range(0, fare_type) for i in range(0, station_num - 1) if i <= leg for j in\r\n range(1, station_num) if j >= leg + 1) <= capacity for leg in range(0, station_num - 1)),\r\n name=\"sold_less_than_capacity\")\r\n m.addConstrs((sold[f, i, j] <= mean[f, i, j] for f in range(0, fare_type) for i in range(0, station_num) for j in\r\n range(0, station_num) if i <= j - 1), name=\"sold_less_than_mean\")\r\n m.setObjective((quicksum(\r\n sold[f, i, j] * fare[f, i, j] for f in range(0, fare_type) for i in range(0, station_num) for j in\r\n range(0, station_num) if i <= j - 1)), GRB.MAXIMIZE)\r\n m.optimize()\r\n m.write('IP.lp')\r\n m.write('IP.sol')\r\n solution={}\r\n for i in sold:\r\n if i[1]<=i[2]-1:\r\n solution[i]=sold[i].x\r\n for i in solution:\r\n a=newton(sigma[i],mean[i],solution[i])\r\n solution[i]=a\r\n print(a,solution[i],i)\r\n print(solution)\r\n return solution\r\n\r\n\r\n\r\n\r\n#Newton.newton(3.52,18.15,18.15)\r\n\r\n\r\n\r\n\r\n","repo_name":"deyiaodiao/PSO-of-An-efficient-computational-approach-for-railway-booking-problems","sub_path":"LP_solve.py","file_name":"LP_solve.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"3842354959","text":"import sys\nimport torch\nimport numpy as np\nsys.path.append('..')\nimport json\nfrom collections import defaultdict\nfrom tqdm import tqdm\nimport re\n\nfrom lama.modules import build_model_by_name\nimport lama.options as options\nfrom lama.modules import base_connector\nfrom lama import attacks\n\n\ndef main(args):\n try_cuda = torch.cuda.is_available()\n\n model = build_model_by_name(args.models_names[0], args)\n model.add_hooks()\n embedding_weight = model.get_embedding_weight()\n\n sentences = ['The theory of relativity was developed by Einstein.',\n 'Windows was developed by Microsoft.']\n\n sentences = ['(The theory of relativity) was found by (Einstein.)']\n\n sentences = ['(Barack Obama) was born in (Hawaii.)']\n\n sentences = ['Him (speaks English.)']\n\n sentences = ['[The theory of relativity was] killed [by Einstein].',\n '[Windows was] killed [by Microsoft].']\n\n for _ in range(50):\n for token_to_flip in range(0, 3): # TODO: for each token in the trigger\n # back propagation\n model.zero_grad()\n loss, tokens, _, unbracket_mask = model.get_rc_loss(sentences, try_cuda=try_cuda)\n # SHAPE: (batch_size, seq_len)\n unbracket_mask = unbracket_mask.bool()\n loss.backward()\n print(loss)\n\n # SHAPE: (batch_size, seq_len, emb_dim)\n grad = base_connector.extracted_grads[0]\n bs, _, emb_dim = grad.size()\n base_connector.extracted_grads = []\n\n # TODO\n # SHAPE: (batch_size, unbracket_len, emb_dim)\n #grad = grad.masked_select(F.pad(unbracket_mask, (1, 0), 'constant', False)[:, :-1].unsqueeze(-1)).view(bs, -1, emb_dim)\n grad = grad.masked_select(unbracket_mask.unsqueeze(-1)).view(bs, -1, emb_dim)\n # SHAPE: (1, emb_dim)\n grad = grad.sum(dim=0)[token_to_flip].unsqueeze(0)\n print((grad * grad).sum().sqrt())\n\n # SHAPE: (batch_size, unbracket_len)\n tokens = tokens.masked_select(unbracket_mask).view(bs, -1)\n token_tochange = tokens[0][token_to_flip].item()\n\n # Use hotflip (linear approximation) attack to get the top num_candidates\n candidates = attacks.hotflip_attack(grad, embedding_weight, [token_tochange],\n increase_loss=False, num_candidates=10)[0]\n print(model.tokenizer.convert_ids_to_tokens([token_tochange]), model.tokenizer.convert_ids_to_tokens(candidates))\n input()\n\n\ndef pattern_score(args, pattern_json, output_file):\n try_cuda = torch.cuda.is_available()\n model = build_model_by_name(args.models_names[0], args)\n with open(pattern_json, 'r') as fin:\n pattern_json = json.load(fin)\n\n batch_size = 32\n pid2pattern = defaultdict(lambda: {})\n for pid in tqdm(sorted(pattern_json)):\n #if not pid.startswith('P69_'):\n # continue\n snippets = pattern_json[pid]['snippet']\n occs = pattern_json[pid]['occs']\n for (snippet, direction), count in snippets:\n if len(snippet) <= 5 or len(snippet) >= 100: # longer than 5 chars\n continue\n loss = 0\n num_batch = np.ceil(len(occs) / batch_size)\n for b in range(0, len(occs), batch_size):\n occs_batch = occs[b:b + batch_size]\n sentences = ['{} {} ({})'.format(h, snippet, t)\n if direction == 1 else '{} {} ({})'.format(t, snippet, h) for h, t in occs_batch]\n #print((snippet, direction), count)\n #print(sentences)\n #input()\n loss += model.get_rc_loss(sentences, try_cuda=try_cuda)[0].item()\n pid2pattern[pid][snippet] = loss / num_batch\n #print(pid)\n #print(sorted(pid2pattern[pid].items(), key=lambda x: x[1]))\n #input()\n\n with open(output_file, 'w') as fout:\n for pid, pats in pid2pattern.items():\n pats = sorted(pats.items(), key=lambda x: x[1])\n fout.write('{}\\t{}\\n'.format(pid, json.dumps(pats)))\n\n\ndef fill_cloze(args, input_jsonl, batch_size, beam_size):\n try_cuda = torch.cuda.is_available()\n model = build_model_by_name(args.models_names[0], args)\n with open(input_jsonl, 'r') as fin:\n data = [json.loads(l) for l in fin]\n # only keep qa pairs (1) with uppercase initials (2) <= 200 chars (3) not contain number\n data = [d for d in data if d['answer'][0].isupper() and len(d['sentence']) <= 200 and not bool(re.search(r'\\d', d['sentence']))]\n print('#qa pairs {}'.format(len(data)))\n\n acc_token_li, acc_sent_li = [], []\n for b in tqdm(range(0, len(data), batch_size)):\n data_batch = data[b:b + batch_size]\n sents = []\n for d in data_batch:\n start = d['answer_start']\n end = start + len(d['answer'])\n sent = d['sentence'].replace('[', '(').replace(']', ')')\n sent = sent[:start] + '[' + sent[start:end] + ']' + sent[end:]\n sents.append(sent)\n acc_token, acc_sent = model.fill_cloze(sents, try_cuda=try_cuda, beam_size=beam_size)\n acc_token_li.append(acc_token)\n acc_sent_li.append(acc_sent)\n #print(acc_token, acc_sent)\n print('mean acc_token {}, mean acc_sent {}'.format(np.mean(acc_token_li), np.mean(acc_sent_li)))\n\n\ndef fill_cloze_lama_squad(args, input_jsonl, batch_size, beam_size):\n try_cuda = torch.cuda.is_available()\n model = build_model_by_name(args.models_names[0], args)\n with open(input_jsonl, 'r') as fin:\n data = [json.loads(l) for l in fin]\n print('#qa pairs {}'.format(len(data)))\n\n acc_token_li, acc_sent_li = [], []\n for b in tqdm(range(0, len(data), batch_size)):\n data_batch = data[b:b + batch_size]\n sents = []\n for d in data_batch:\n sents.append(d['masked_sentences'][0].replace('[MASK]', '[{}]'.format(d['obj_label'])))\n acc_token, acc_sent = model.fill_cloze(sents, try_cuda=try_cuda, beam_size=beam_size)\n acc_token_li.append(acc_token)\n acc_sent_li.append(acc_sent)\n #print(acc_token, acc_sent)\n print('mean acc_token {}, mean acc_sent {}'.format(np.mean(acc_token_li), np.mean(acc_sent_li)))\n\n\ndef fill_cloze_webquestion(args, input_file, batch_size, beam_size):\n try_cuda = torch.cuda.is_available()\n model = build_model_by_name(args.models_names[0], args)\n with open(input_file, 'r') as fin:\n # keep statement based on number of words in the answer\n sents = [l.strip() for l in fin]\n sents = [s for s in sents if len(re.split('\\[|\\]', s)[1].split()) == 1]\n print('#qa pairs {}'.format(len(sents)))\n\n acc_token_li, acc_sent_li = [], []\n for b in tqdm(range(0, len(sents), batch_size)):\n acc_token, acc_sent = model.fill_cloze(sents[b:b + batch_size], try_cuda=try_cuda, beam_size=beam_size)\n acc_token_li.append(acc_token)\n acc_sent_li.append(acc_sent)\n #print(acc_token, acc_sent)\n print('mean acc_token {}, mean acc_sent {}'.format(np.mean(acc_token_li), np.mean(acc_sent_li)))\n\n\ndef refine_cloze(args):\n try_cuda = torch.cuda.is_available()\n model = build_model_by_name(args.models_names[0], args)\n sents = ['The theory of relativity [ is killed by ] Einstein .', 'Windows [ is killed by ] Microsoft .']\n model.refine_cloze(sents, try_cuda=try_cuda)\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n torch.random.manual_seed(0)\n torch.cuda.manual_seed(0)\n\n parser = options.get_rc_parser()\n args = options.parse_args(parser)\n #main(args)\n #pattern_score(args, 'patterns.json', 'output/test.txt')\n #fill_cloze(args, '/home/zhengbaj/data/squad/train-v2.0.jsonl',\n # batch_size=args.batch_size, beam_size=args.beam_size)\n #fill_cloze_lama_squad(args, 'data/Squad/test.jsonl',\n # batch_size=args.batch_size, beam_size=args.beam_size)\n #fill_cloze_webquestion(args, '/home/zhengbaj/data/webquestion/statement.txt',\n # batch_size=args.batch_size, beam_size=args.beam_size)\n refine_cloze(args)\n","repo_name":"AbhilashaRavichander/LAMA","sub_path":"scripts/rc.py","file_name":"rc.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"10918582877","text":"from dataclasses import dataclass\nfrom datetime import datetime\nfrom threading import Lock\nfrom typing import Dict, List, Optional, TYPE_CHECKING, Tuple\n\nfrom requests import ConnectionError\n\nfrom vnpy.api.rest import Request\nfrom vnpy.gateway.oanda.oanda_api_base import OandaApiBase\nfrom vnpy.trader.constant import Direction, Exchange, Interval, Offset, Product, Status\nfrom vnpy.trader.object import AccountData, BarData, CancelRequest, ContractData, OrderRequest, \\\n PositionData\nfrom .oanda_common import (INTERVAL_VT2OANDA, INTERVAL_VT2OANDA_DELTA, ORDER_TYPE_VT2OANDA,\n STATUS_OANDA2VT, local_tz, parse_datetime, parse_time, utc_tz)\n\nif TYPE_CHECKING:\n from vnpy.gateway.oanda import OandaGateway\n_ = lambda x: x # noqa\n\nHOST = \"https://api-fxtrade.oanda.com\"\nTEST_HOST = \"https://api-fxpractice.oanda.com\"\n\n# asked from official developer\nPRICE_TICKS = {\n \"BTCUSD\": 0.5,\n \"ETHUSD\": 0.05,\n \"EOSUSD\": 0.001,\n \"XRPUSD\": 0.0001,\n}\n\n\n@dataclass()\nclass HistoryDataNextInfo:\n symbol: str\n interval: Interval\n end: datetime\n\n\nclass RequestFailedException(Exception):\n pass\n\n\nclass OandaRestApi(OandaApiBase):\n \"\"\"\n Oanda Rest API\n \"\"\"\n\n def __init__(self, gateway: \"OandaGateway\"):\n \"\"\"\"\"\"\n super(OandaRestApi, self).__init__(gateway)\n\n self.order_count = 1_000_000\n self.order_count_lock = Lock()\n\n self.connect_time = 0\n\n self.contracts: Dict[str, ContractData] = {}\n\n self.last_account_transaction_id: Optional[str] = None\n\n # used for automatic tests.\n self.account_initialized = False\n self.orders_initialized = False\n\n def connect(\n self,\n key: str,\n session_number: int,\n server: str,\n proxy_host: str,\n proxy_port: int,\n ):\n \"\"\"\n Initialize connection to REST server.\n \"\"\"\n self.key = key\n\n self.connect_time = (\n int(datetime.now().strftime(\"%y%m%d%H%M%S\")) * self.order_count\n )\n\n if server == \"REAL\":\n self.init(HOST, proxy_host, proxy_port)\n else:\n self.init(TEST_HOST, proxy_host, proxy_port)\n\n self.start(session_number)\n\n self.gateway.write_log(_(\"REST API启动成功\"))\n\n def _new_order_id(self) -> str:\n \"\"\"\"\"\"\n with self.order_count_lock:\n self.order_count += 1\n return f'a{self.connect_time}{self.order_count}'\n\n @staticmethod\n def is_local_order_id(order_id: str):\n return order_id[0] == 'a'\n\n def send_order(self, req: OrderRequest):\n \"\"\"\"\"\"\n req.offset = Offset.NONE\n order_id = self._new_order_id()\n account_id = req.account_id\n symbol = req.symbol\n vol = int(req.volume)\n direction = req.direction\n data = {\n \"instrument\": symbol,\n # positive for long , negative for short\n \"units\": vol if direction is Direction.LONG else -vol,\n \"clientExtensions\": {\n \"id\": order_id,\n }\n }\n\n order = req.create_order_data(order_id, self.gateway_name, account_id=account_id)\n order.time = parse_time(datetime.now().isoformat())\n\n # Only add price for limit order.\n data[\"type\"] = ORDER_TYPE_VT2OANDA[req.type]\n data[\"price\"] = str(req.price)\n self.gateway.orders[order.orderid] = order\n self.add_request(\n \"POST\",\n f\"/v3/accounts/{account_id}/orders\",\n callback=self.on_send_order,\n data={'order': data},\n extra=order,\n on_failed=self.on_send_order_failed,\n on_error=self.on_send_order_error,\n )\n self.gateway.on_order(order)\n return order.vt_orderid\n\n def cancel_order(self, req: CancelRequest):\n \"\"\"\"\"\"\n order_id = req.orderid\n account_id = req.account_id\n order = self.gateway.orders[order_id]\n\n if self.is_local_order_id(order_id):\n order_id = '@' + order_id\n self.add_request(\n \"PUT\",\n f\"/v3/accounts/{account_id}/orders/{order_id}/cancel\",\n callback=self.on_cancel_order,\n extra=order,\n )\n\n def query_history(self,\n symbol: str,\n interval: Interval,\n start: datetime,\n limit: int = None,\n ) -> Tuple[List[BarData], \"HistoryDataNextInfo\"]:\n \"\"\"\n Get history data synchronously.\n \"\"\"\n if limit is None:\n limit = self.gateway.HISTORY_RECORD_PER_REQUEST\n\n bars = []\n\n # datetime for a bar is close_time\n # we got open_time from API.\n adjustment = INTERVAL_VT2OANDA_DELTA[interval]\n\n # todo: RestClient: return RestClient.Request object instead of requests.Response.\n resp = self.request(\n \"GET\",\n f\"/v3/instruments/{symbol}/candles\",\n params={\n \"price\": \"MAB\", # M for mids, B for bids, A for asks\n \"granularity\": INTERVAL_VT2OANDA[interval],\n \"count\": 5000,\n \"from\": start.isoformat(),\n }\n )\n\n # Break if request failed with other status code\n raw_data = resp.json()\n # noinspection PyTypeChecker\n if not self.is_request_success(raw_data, None):\n msg = f\"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}\"\n self.gateway.write_log(msg)\n raise RequestFailedException(msg)\n result = raw_data['candles']\n for data in result:\n bar_data = data['mid']\n open_time = parse_datetime(data[\"time\"])\n # close_time = open_time + adjustment\n close_time = open_time\n bar = BarData(\n symbol=symbol,\n exchange=Exchange.OANDA,\n datetime=close_time,\n interval=interval,\n volume=data[\"volume\"],\n open_price=float(bar_data[\"o\"]),\n high_price=float(bar_data[\"h\"]),\n low_price=float(bar_data[\"l\"]),\n close_price=float(bar_data[\"c\"]),\n close_bid_price=float(data[\"bid\"][\"c\"]),\n close_ask_price=float(data[\"ask\"][\"c\"]),\n gateway_name=self.gateway_name\n )\n bars.append(bar)\n\n end = bars[-1].datetime.replace(tzinfo=utc_tz).astimezone(tz=local_tz)\n return bars, HistoryDataNextInfo(symbol, interval, end)\n\n def on_send_order_failed(self, status_code: int, request: Request):\n \"\"\"\n Callback when sending order failed on server.\n \"\"\"\n data = request.response.json()\n\n order = request.extra\n order.status = Status.REJECTED\n self.gateway.on_order(order)\n\n msg = f\"委托失败,错误代码:{data.get('errorCode', '')}, 错误信息:{data['errorMessage']}\"\n self.gateway.write_log(msg)\n\n def on_send_order_error(\n self, exception_type: type, exception_value: Exception, tb, request: Request\n ):\n \"\"\"\n Callback when sending order caused exception.\n \"\"\"\n order = request.extra\n order.status = Status.REJECTED\n self.gateway.on_order(order)\n\n # Record exception if not ConnectionError\n if not issubclass(exception_type, ConnectionError):\n self.on_error(exception_type, exception_value, tb, request)\n\n def on_send_order(self, raw_data: dict, request: Request):\n \"\"\"\"\"\"\n # order: OrderData = request.extra\n # creation = raw_data.get('orderCreateTransaction', None)\n # if creation is not None:\n # order.status = Status.NOTTRADED\n # order.time = creation['time'][11:19]\n # self.gateway.on_order(order)\n\n # cancel = raw_data.get('orderCancelTransaction', None)\n # if cancel is not None:\n # # potential bug: stream API will generate a Status.Cancel Order for this transaction\n # order.status = Status.REJECTED\n # order.time = parse_time(cancel['time'])\n pass\n\n def on_cancel_order(self, raw_data: dict, request: Request):\n \"\"\"\"\"\"\n # order: OrderData = request.extra\n # order.status = Status.CANCELLED\n # order.time = raw_data['orderCancelTransaction']['time'][11:19]\n # self.gateway.on_order(order)\n pass\n\n def on_failed(self, status_code: int, request: Request):\n \"\"\"\n Callback to handle request failed.\n \"\"\"\n data = request.response.json()\n self._handle_error_response(data, request)\n\n def _handle_error_response(self, data, request, operation_name: str = None):\n if operation_name is None:\n operation_name = request.path\n\n # todo: rate limit?\n\n error_msg = data.get(\"message\", None)\n if error_msg is None:\n error_msg = data.get(\"errorMessage\", None)\n msg = f\"请求{operation_name}失败,状态码:{request.status},错误消息:{error_msg}\"\n msg += f'\\n{request}'\n self.gateway.write_log(msg)\n\n def query_positions(self):\n self.add_request(\"GET\",\n f\"/v3/accounts/{self.gateway.account_id}/positions\",\n self.on_query_positions\n )\n\n def query_account_changes(self):\n do_nothing = lambda a, b, c, d: None # noqa\n\n if self.last_account_transaction_id is not None:\n account_id = self.gateway.account_id\n self.add_request(\"GET\",\n f\"/v3/accounts/{account_id}/changes\",\n params={\n \"sinceTransactionID\": self.last_account_transaction_id\n },\n callback=self.on_query_account_changes,\n extra=account_id,\n on_error=do_nothing,\n )\n\n def on_query_account_changes(self, raw_data: dict, request: \"Request\"):\n account_id: str = request.extra\n\n # state: we focus mainly on account balance\n state = raw_data['state']\n NAV = float(state['NAV'])\n pnl = float(state['unrealizedPL'])\n account = AccountData(\n gateway_name=self.gateway_name,\n accountid=account_id,\n balance=NAV - pnl,\n frozen=0, # no frozen\n )\n self.gateway.on_account(account)\n\n # changes: we focus mainly on position changes\n changes = raw_data['changes']\n positions = changes['positions']\n unrealized_pnls: Dict[(str, Direction), float] = {}\n\n # pnl in query_account_changes is different from data returned by query_account\n # we have to get pnl from 'state' record.\n for pos_state_data in state['positions']:\n symbol = pos_state_data['instrument']\n unrealized_pnls[(symbol, Direction.LONG)] = float(pos_state_data['longUnrealizedPL'])\n unrealized_pnls[(symbol, Direction.SHORT)] = float(pos_state_data['shortUnrealizedPL'])\n\n for pos_data in positions:\n pos_long, pos_short = self.parse_position_data(pos_data)\n symbol = pos_long.symbol\n pos_long.pnl = unrealized_pnls[(symbol, Direction.LONG)]\n pos_short.pnl = unrealized_pnls[(symbol, Direction.SHORT)]\n self.gateway.on_position(pos_long)\n self.gateway.on_position(pos_short)\n self.last_account_transaction_id = raw_data['lastTransactionID']\n\n def on_query_positions(self, raw_data: dict, request: \"Request\"):\n for pos_data in raw_data['positions']:\n pos_long, pos_short = self.parse_position_data(pos_data)\n self.gateway.on_position(pos_long)\n self.gateway.on_position(pos_short)\n\n def parse_position_data(self, pos_data) -> Tuple[PositionData, PositionData]:\n symbol = pos_data['instrument']\n pos_long, pos_short = [\n PositionData(\n gateway_name=self.gateway_name,\n symbol=symbol,\n exchange=Exchange.OANDA,\n direction=direction,\n volume=int(data['units']),\n price=float(data.get('averagePrice', 0.0)),\n pnl=float(data.get('unrealizedPL', 0.0)),\n )\n for direction, data in (\n (Direction.LONG, pos_data['long']),\n (Direction.SHORT, pos_data['short'])\n )\n ]\n return pos_long, pos_short\n\n def query_orders(self):\n \"\"\"\n query all orders, including stop orders\n \"\"\"\n self.add_request(\"GET\",\n f\"/v3/accounts/{self.gateway.account_id}/orders?state=ALL\",\n callback=self.on_query_orders,\n )\n\n def on_query_orders(self, raw_data: dict, request: \"Request\"):\n for data in raw_data['orders']:\n order = self.gateway.parse_order_data(data,\n STATUS_OANDA2VT[data['state']],\n 'createTime')\n self.gateway.on_order(order)\n self.orders_initialized = True\n\n def query_accounts(self):\n self.add_request(\"GET\",\n \"/v3/accounts\",\n callback=self.on_query_accounts,\n )\n\n def on_query_accounts(self, raw_data: dict, request: \"Request\"):\n \"\"\"\n {\"accounts\":[{\"id\":\"101-001-12185735-001\",\"tags\":[]}]}\n \"\"\"\n if not self.gateway.account_id:\n for acc in raw_data['accounts']:\n account_id: str = acc['id']\n self.gateway.account_id = account_id\n\n self.query_account(account_id)\n self.query_contracts(account_id)\n # self.query_orders()\n # self.query_positions()\n if self.gateway.account_id:\n self.query_account(account_id)\n self.query_contracts(account_id)\n\n def query_account(self, account_id: str):\n self.add_request(\"GET\",\n f\"/v3/accounts/{account_id}\",\n callback=self.on_query_account,\n )\n\n def on_query_account(self, raw_data: dict, request: \"Request\"):\n acc = raw_data['account']\n account_data = AccountData(\n gateway_name=self.gateway_name,\n accountid=acc['id'],\n balance=float(acc['balance']),\n frozen=0, # no frozen\n )\n self.gateway.on_account(account_data)\n for data in acc['orders']:\n # order = self.parse_order_data(data)\n # self.gateway.on_order(order)\n pass\n for pos_data in acc['positions']:\n pos_long, pos_short = self.parse_position_data(pos_data)\n self.gateway.on_position(pos_long)\n self.gateway.on_position(pos_short)\n for trade in acc['trades']:\n pass\n self.last_account_transaction_id = acc['lastTransactionID']\n # self.gateway.stream_api.subscribe_transaction()\n self.account_initialized = True\n\n @property\n def fully_initialized(self):\n return self.account_initialized and self.orders_initialized\n\n def query_contracts(self, account_id):\n self.add_request(\"GET\",\n f\"/v3/accounts/{account_id}/instruments\",\n self.on_query_contracts)\n\n def on_query_contracts(self, data: dict, request: \"Request\"):\n for ins in data['instruments']:\n symbol = ins['name']\n contract = ContractData(\n gateway_name=self.gateway_name,\n symbol=symbol,\n exchange=Exchange.OANDA,\n name=symbol,\n product=Product.FOREX,\n size=1,\n history_data=True,\n pricetick=1.0 / pow(10, ins['displayPrecision']),\n )\n self.gateway.on_contract(contract)\n self.contracts[symbol] = contract\n self.gateway.write_log(_(\"合约信息查询成功\"))\n","repo_name":"maxsonic/vnpy_oanda","sub_path":"vnpy/gateway/oanda/oanda_rest_api.py","file_name":"oanda_rest_api.py","file_ext":"py","file_size_in_byte":16295,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"1117500542","text":"# asks for player two's word to be guessed and returns it\ndef request_word():\n while True:\n line = input(\"Pick a word for the other player to try to guess: \")\n i = 0\n for c in line:\n if (c.isalpha()):\n i = i + 1\n if (i > 3):\n return line\n else:\n print(\"There must be at least 3 letters in your word! Type a new word.\")\n\n# asks whether or not it is okay to display the word to confirm it was typed correctly and returns yes if it is\ndef okay_to_confirm():\n while True:\n line = input(\"Would you like to confirm that your word was typed correctly? (yes/no) \")\n if (line.strip() == \"yes\"):\n return True\n if (line.strip() == \"no\"):\n return False\n print(\"You must answer with either \\\"yes\\\" or \\\"no\\\".\")\n\n# asks whether the word shown is correct or not and returns yes if it is correct\ndef ask_for_confirmation():\n while True:\n line = input(\"Is the word you see the word you typed? (yes/no) \")\n if (line.strip() == \"yes\"):\n return True\n if (line.strip() == \"no\"):\n return False\n print(\"You must answer with either \\\"yes\\\" or \\\"no\\\".\")\n\nletters_already_guessed = []\n# asks for a letter guess and returns it\ndef ask_for_guess():\n while True:\n line = input(\"Enter a letter to guess: \")\n letter = line.strip().upper()\n if (len(letter) != 1):\n print (\"Enter exactly one letter.\")\n continue\n if (letter in letters_already_guessed):\n print (\"You've already guessed that letter! Come up with a different one.\")\n continue\n if (not letter.isalpha()):\n print (\"That's not a letter! Enter one letter, from A to Z.\")\n continue\n letters_already_guessed.append(letter)\n return letter\n\n# tells the first player (s)he won\ndef player_one_wins():\n print (\"Player 1 wins! Stellar deductions.\")\n\n# tells the second player (s)he won\ndef player_two_wins():\n print (\"Player 2 wins! What a tricky word!\")\n\n# tells the player (s)he lost to the computer\ndef you_lose() :\n print (\"You lose! Better luck next time.\")\n\n# tells the player (s)he defeated the computer\ndef you_win():\n print (\"You win! Stellar deductions.\")\n\n# asks if two or one players will be playing, and returns the boolean answer\ndef ask_if_multiplayer():\n while True:\n line = input(\"Enter the number of players (1 or 2): \")\n if (line.strip() == \"1\"):\n return False\n if (line.strip() == \"2\"):\n return True\n print(\"You must enter a valid response of \\\"1\\\" or \\\"2\\\" players.\")\n\n# requests the integer number of letters that is the length of the random word and returns it or zero (if length is to be unspecified)\ndef ask_number_letters():\n while True:\n line = input(\"Enter the length of the word to guess, or \\\"any\\\" if you would like it to be randomized: \")\n if (line.strip() == \"any\"):\n return 0\n try:\n value = int(line)\n if (value > 12):\n print(\"Sorry, I don't have any words that long to pick from. The longest words I have are 12 letters long.\")\n elif (value > 3):\n return value\n else:\n print(\"A word with which hangman is to be played must be at least 4 letters long.\")\n except ValueError:\n print(\"Uh oh, you didn't enter an integer or the word \\\"any\\\"! Try again.\")\n\n# asks if the user wants to play again or if they would like to exit and returns the boolean answer\ndef play_again():\n letters_already_guessed.clear()\n while True:\n line = input(\"Would you like to play again? (yes/no) \")\n if (line.strip() == \"yes\"):\n return True\n if (line.strip() == \"no\"):\n return False\n print(\"You must answer with either \\\"yes\\\" or \\\"no\\\".\")\n\n# asks if the player would like to use the same settings as the last game and returns the boolean answer\ndef change_settings():\n while True:\n line = input(\"Would you like to change the game settings? (yes/no) \")\n if (line.strip() == \"yes\"):\n return True\n if (line.strip() == \"no\"):\n return False\n print(\"You must answer with either \\\"yes\\\" or \\\"no\\\".\")","repo_name":"Randall-Scharpf/TurtleHangman","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30566762426","text":"import json\nimport logging\nimport os\nimport random\nimport string\nimport time\n\nimport requests\nimport yaml\nfrom configs.wfmconfig import config_file_url, tool_blockmerger, tool_tokeniser, tool_fileconverter, tool_aligner, tool_translator\nfrom configs.wfmconfig import tool_worddetector, tool_layoutdetector, tool_ch, tool_nmt, tool_ocrgooglevision, tool_ocrtesseract, tool_annotator\nfrom configs.wfmconfig import tool_blocksegmenter, tool_ocrdd10googlevision, tool_ocrdd15googlevision, \\\n jobid_random_str_length, tool_ocrtokeniser, tool_filetranslator, tool_imageocr, tool_ocrdd20tesseract\nfrom repository.wfmrepository import WFMRepository\nfrom anuvaad_auditor.loghandler import log_exception, log_error, log_info\n\nfrom tools.aligner import Aligner\nfrom tools.tokeniser import Tokeniser\nfrom tools.file_converter import FileConverter\nfrom tools.block_merger import BlockMerger\nfrom tools.translator import Translator\nfrom tools.contenthandler import ContentHandler\nfrom tools.nmt import NMT\nfrom tools.word_detector import WordDetector\nfrom tools.layout_detector import LayoutDetector\nfrom tools.ocr_gv import OCRGV\nfrom tools.ocr_dd10_gv import OCRDD10GV\nfrom tools.ocr_dd15_gv import OCRDD15GV\nfrom tools.ocr_tesseract import OCRTESS\nfrom tools.block_segmenter import BlockSegmenter\nfrom tools.ocr_tokeniser import OCRTokeniser\nfrom tools.annotator import Annotator\nfrom tools.file_translator import FileTranslator\nfrom tools.image_ocr import ImageOCR\nfrom tools.ocr_dd20 import OCRDD20\n\n\naligner = Aligner()\ntokeniser = Tokeniser()\nfile_converter = FileConverter()\nblock_merger = BlockMerger()\ntranslator = Translator()\nch = ContentHandler()\nnmt = NMT()\nword_detector = WordDetector()\nlayout_detector = LayoutDetector()\nocrgv = OCRGV()\nocrdd10gv = OCRDD10GV()\nocrdd15gv = OCRDD15GV()\nocrtess = OCRTESS()\nocrdd20tess = OCRDD20()\nblock_segmenter = BlockSegmenter()\nocr_tokeniser = OCRTokeniser()\nannotator = Annotator()\nfile_translator = FileTranslator()\nimage_ocr = ImageOCR()\nwfmrepo = WFMRepository()\n\nlog = logging.getLogger('file')\nconfigs_global = {}\n\nyaml_file_loc = \"/app/configs\"\nyam_file_path_delimiter = \"/\"\nyaml_file_name = \"wfconfig.yml\"\n\n\nclass WFMUtils:\n def __init__(self):\n pass\n\n # Reads config yaml file located at a remote location.\n # Converts the configs into dict, makes it available for the entire app.\n def read_all_configs(self):\n try:\n file = requests.get(config_file_url, allow_redirects=True)\n file_path = yaml_file_loc + yam_file_path_delimiter + yaml_file_name\n open(file_path, 'wb').write(file.content)\n with open(file_path, 'r') as stream:\n parsed = yaml.safe_load(stream)\n configs = parsed['WorkflowConfigs']\n for obj in configs:\n key = obj['workflowCode']\n configs_global[key] = obj\n except Exception as exc:\n log_exception(\"Exception while reading configs: \" +\n str(exc), None, exc)\n\n # Method that returns configs\n def get_configs(self):\n return configs_global\n\n # Method to pick all the output topics from the config\n # This includes all the workflows defined in that file.\n # The WFM consumer will listen to these topics only.\n def fetch_output_topics(self, all_configs):\n topics = []\n for key in all_configs:\n config = all_configs[key]\n sequence = config[\"sequence\"]\n for step in sequence:\n tool_details = step[\"tool\"][0]\n if 'kafka-output' in tool_details.keys():\n output_topic = os.environ.get(\n tool_details[\"kafka-output\"][0][\"topic\"], \"NA\")\n if output_topic != \"NA\" and output_topic not in topics:\n topics.append(output_topic)\n return topics\n\n # Helper method to fetch tools involved in a given workflow.\n def get_tools_of_wf(self, workflowCode):\n configs = self.get_configs()\n config = configs[workflowCode]\n sequence = config[\"sequence\"]\n tools = []\n for step in sequence:\n tool = step[\"tool\"][0][\"name\"]\n if tool not in tools:\n tools.append(tool)\n return tools\n\n # Generates unique job id.\n # Format: -<6_char_random>-<13_digit_epoch>\n def generate_job_id(self, workflowCode):\n config = self.get_configs()\n config_to_be_used = config[workflowCode]\n use_case = config_to_be_used[\"useCase\"]\n rand_str = ''.join(random.choice(string.ascii_letters)\n for i in range(eval(str(jobid_random_str_length))))\n return use_case + \"-\" + str(rand_str) + \"-\" + str(time.time()).replace('.', '')[0:13]\n\n # Fetches the order of execution for a given workflow.\n def get_order_of_exc(self, workflowCode):\n order_of_exc_dict = {}\n config = self.get_configs()\n config_to_be_used = config[workflowCode]\n sequence = config_to_be_used[\"sequence\"]\n for step in sequence:\n order_of_exc_dict[step[\"order\"]] = step\n return order_of_exc_dict\n\n # Returns the input required for the current tool to execute.\n # current_tool = The tool of which the input is to be computed.\n # previous tool = Previous tool which got executed and produced 'task_output'.\n # wf_input = Input received during initiation of wf.\n def get_tool_input_async(self, current_tool, previous_tool, task_output, wf_input):\n tool_input = None\n ocr_tools = [tool_worddetector, tool_layoutdetector, tool_ocrgooglevision,\n tool_ocrdd10googlevision, tool_ocrdd15googlevision, tool_ocrdd20tesseract, tool_ocrtesseract, tool_blocksegmenter]\n if wf_input is None:\n if current_tool == tool_aligner:\n tool_input = aligner.get_aligner_input(\n task_output, previous_tool)\n if current_tool == tool_tokeniser:\n tool_input = tokeniser.get_tokeniser_input(\n task_output, previous_tool)\n if current_tool == tool_fileconverter:\n tool_input = file_converter.get_fc_input(\n task_output, previous_tool)\n if current_tool == tool_blockmerger:\n tool_input = block_merger.get_bm_input(\n task_output, previous_tool)\n if current_tool == tool_translator:\n tool_input = translator.get_translator_input(\n task_output, previous_tool, False)\n job_details, files = self.get_job_details(\n task_output[\"jobID\"])[0], []\n for file in tool_input[\"input\"][\"files\"]:\n file[\"model\"] = job_details[\"input\"][\"files\"][0][\"model\"]\n if 'context' in job_details[\"input\"][\"files\"][0].keys():\n file[\"context\"] = job_details[\"input\"][\"files\"][0][\"context\"]\n files.append(file)\n tool_input[\"input\"][\"files\"] = files\n if current_tool == tool_worddetector:\n tool_input = word_detector.get_wd_input(\n task_output, previous_tool)\n if current_tool == tool_layoutdetector:\n tool_input = layout_detector.get_ld_input(\n task_output, previous_tool)\n if current_tool == tool_blocksegmenter:\n tool_input = block_segmenter.get_bs_input(\n task_output, previous_tool)\n if current_tool == tool_ocrtesseract:\n tool_input = ocrtess.get_octs_input(task_output, previous_tool)\n if current_tool == tool_ocrgooglevision:\n tool_input = ocrgv.get_ogv_input(task_output, previous_tool)\n if current_tool == tool_ocrdd10googlevision:\n tool_input = ocrdd10gv.get_odd10gv_input(\n task_output, previous_tool)\n if current_tool == tool_ocrdd15googlevision:\n tool_input = ocrdd15gv.get_odd15gv_input(\n task_output, previous_tool)\n if current_tool == tool_ocrdd20tesseract:\n tool_input = ocrdd20tess.get_odd20_input(\n task_output, previous_tool)\n if current_tool == tool_ocrtokeniser:\n tool_input = ocr_tokeniser.get_ocr_tokeniser_input(\n task_output, previous_tool)\n if current_tool == tool_filetranslator:\n tool_input = file_translator.get_ft_input(task_output)\n if current_tool == tool_imageocr:\n tool_input = image_ocr.get_image_ocr_input(task_output)\n if current_tool in ocr_tools:\n job_details = self.get_job_details(task_output[\"jobID\"])[0]\n for file in tool_input[\"input\"][\"inputs\"]:\n file[\"config\"] = job_details[\"input\"][\"files\"][0][\"config\"]\n else:\n if current_tool == tool_aligner:\n tool_input = aligner.get_aligner_input_wf(wf_input)\n if current_tool == tool_tokeniser:\n tool_input = tokeniser.get_tokeniser_input_wf(wf_input, False)\n if current_tool == tool_fileconverter:\n tool_input = file_converter.get_fc_input_wf(wf_input)\n if current_tool == tool_blockmerger:\n tool_input = block_merger.get_bm_input_wf(wf_input)\n if current_tool == tool_translator:\n tool_input = translator.get_translator_input_wf(\n wf_input, False)\n if current_tool == tool_worddetector:\n tool_input = word_detector.get_wd_input_wf(wf_input)\n if current_tool == tool_layoutdetector:\n tool_input = layout_detector.get_ld_input_wf(wf_input)\n if current_tool == tool_ocrgooglevision:\n tool_input = ocrgv.get_ogv_input_wf(wf_input)\n if current_tool == tool_ocrdd10googlevision:\n tool_input = ocrdd10gv.get_odd10gv_input_wf(wf_input)\n if current_tool == tool_ocrdd15googlevision:\n tool_input = ocrdd15gv.get_odd15gv_input_wf(wf_input)\n if current_tool == tool_ocrdd20tesseract:\n tool_input = ocrdd20tess.get_odd20_input_wf(wf_input)\n if current_tool == tool_ocrtesseract:\n tool_input = ocrtess.get_octs_input_wf(wf_input)\n if current_tool == tool_blocksegmenter:\n tool_input = block_segmenter.get_bs_input_wf(wf_input)\n if current_tool == tool_ocrtokeniser:\n tool_input = ocr_tokeniser.get_ocr_tokeniser_input_wf(\n wf_input, False)\n if current_tool == tool_annotator:\n tool_input = annotator.get_annotator_input_wf(wf_input)\n if current_tool == tool_filetranslator:\n tool_input = file_translator.get_ft_input_wf(wf_input)\n if current_tool == tool_imageocr:\n tool_input = image_ocr.get_image_ocr_input_wf(wf_input)\n return tool_input\n\n # Returns the input required for the current tool to execute for the sync process.\n # current_tool = The tool of which the input is to be computed.\n # previous tool = Previous tool which got executed and produced 'task_output'.\n # wf_input = Input received during initiation of wf.\n\n def get_tool_input_sync(self, current_tool, previous_tool, task_output, wf_input):\n tool_input = {}\n if wf_input is None:\n if current_tool == tool_tokeniser:\n tool_input = tokeniser.get_tokeniser_input(\n task_output, previous_tool)\n if current_tool == tool_translator:\n tool_input = translator.get_translator_input(\n task_output, previous_tool, True)\n job_details = self.get_job_details(task_output[\"jobID\"])[0]\n\n if translator.is_contains_list_of_paragraphs(task_output=task_output) is False:\n tool_input[\"input\"][\"model\"] = job_details[\"input\"][\"model\"]\n\n if 'modifiedSentences' in job_details[\"input\"].keys():\n tool_input[\"input\"][\"modifiedSentences\"] = job_details[\"input\"][\"modifiedSentences\"]\n if 'context' in job_details[\"input\"].keys():\n tool_input[\"input\"][\"context\"] = job_details[\"input\"][\"context\"]\n if 'retranslate' in job_details[\"input\"].keys():\n tool_input[\"input\"][\"retranslate\"] = job_details[\"input\"][\"retranslate\"]\n else:\n if current_tool == tool_tokeniser:\n tool_input = tokeniser.get_tokeniser_input_wf(wf_input, True)\n if current_tool == tool_translator:\n tool_input = translator.get_translator_input_wf(wf_input, True)\n if current_tool == tool_ch:\n tool_input = ch.get_ch_update_req(wf_input)\n if current_tool == tool_nmt:\n tool_input = nmt.get_nmt_it_req(wf_input)\n if current_tool == tool_filetranslator:\n tool_input = file_translator.get_ft_input_wf(wf_input)\n return tool_input\n\n # Util method to make an API call and fetch the result\n def call_api(self, uri, api_input, user_id):\n try:\n log_info(\"URI: \" + uri, None)\n api_headers = {'userid': user_id, 'x-user-id': user_id,\n 'Content-Type': 'application/json'}\n response = requests.post(\n url=uri, json=api_input, headers=api_headers)\n if response is not None:\n if response.text is not None:\n log_info(response.text, None)\n data = json.loads(response.text)\n return data\n else:\n log_error(\"API response was None !\", api_input, None)\n return None\n else:\n log_error(\"API call failed!\", api_input, None)\n return None\n except Exception as e:\n log_exception(\n \"Exception while making the api call: \" + str(e), api_input, e)\n return None\n\n # Method to search jobs on job id for internal logic.\n def get_job_details(self, job_id):\n query = {\"jobID\": job_id}\n exclude = {'_id': False}\n return wfmrepo.search_job(query, exclude, None, None)\n","repo_name":"Abhilash-tarento/anuvaad","sub_path":"anuvaad-etl/anuvaad-workflow-mgr/etl-wf-manager/utilities/wfmutils.py","file_name":"wfmutils.py","file_ext":"py","file_size_in_byte":14479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"74492781673","text":"'''\nPrepping UCR\n\nThe srs_top_cities.dta comes from Jacob Kaplans\nreplication files at\n\nhttps://www.openicpsr.org/openicpsr/project/176021/version/V2/view\n'''\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nandy_theme = {'axes.grid': True,\n 'grid.linestyle': '--',\n 'legend.framealpha': 1,\n 'legend.facecolor': 'white',\n 'legend.shadow': True,\n 'legend.fontsize': 14,\n 'legend.title_fontsize': 16,\n 'xtick.labelsize': 14,\n 'ytick.labelsize': 14,\n 'axes.labelsize': 16,\n 'axes.titlesize': 20,\n 'figure.dpi': 100}\n\nmatplotlib.rcParams.update(andy_theme)\n\n# load in data, this is via Kaplan's replication files\n# UCR data\ncrimes = pd.read_stata('srs_top_cities.dta')\n\n# Only keep agencies in Hogan top 100 sample\ncrimes = crimes[crimes['type'] != ''].copy()\n\n# only keep full reporting\ncrimes = crimes[crimes['number_of_months_missing'] == 0].copy()\n\n# Only keep agencies with full 20 years of data\n# I am keeping this in, as many of these cities have weird data values\n# Birmingham 2019, Raleigh 2019, Winston-Salem 2019\n# easier just to eliminate\ncity_20 = crimes['city'].value_counts().reset_index().sort_values('city')\ncity_20 = city_20['index'][city_20.city == 20].tolist()\n\n# This eliminates (NC cities only NIBRS recent maybe?)\n# 99 Louisville 16\n# 98 Raleigh 16\n# 96 Birmingham 18\n# 95 Chesapeake, VA 18\n# 94 Honolulu 18\n# 93 Winston-Salem 18\n# 97 North Las Vegas 18\n# 86 Oakland/Alameda County 19\n# 88 Greensboro/Guilford County 19\n# 87 Lincoln, Neb./Lancaster County 19\n# 90 Newark 19\n# 91 Portland/Multnomah County 19\n# 92 New Orleans 19\n# 89 Durham 19\n\ncity_20 = city_20['index'][city_20.city == 20].tolist()\ncrimes = crimes[crimes['city'].isin(city_20)].copy()\n\n# Eliminate progressive cities that are not Philly\n#not_prog = (crimes['type'] != 'Progressive') | (crimes['city'] == 'Philadelphia')\n#crimes = crimes[not_prog].copy()\n\n# Prep final data\n# Replace the Philly numbers with Hogans, 2010-2019\nphilly_recent = [306,\n 326,\n 331,\n 246,\n 248,\n 280,\n 277,\n 315,\n 353,\n 356]\n\nPhilly = (crimes['city'] == 'Philadelphia') & (crimes['year'] >= 2010)\n\ncrimes['hom'] = crimes[['actual_murder', 'actual_manslaughter']].sum(axis=1)\ncrimes.loc[Philly,'hom'] = philly_recent\ncrimes['hom_rate'] = (crimes['hom']/crimes['population'])*100000\ncrimes['Philly'] = 1*(crimes['city'] == 'Philadelphia')\n\nkeep_fields = ['city','ori','year','population',\n 'hom','hom_rate','type','Philly']\n\ncrimes = crimes[keep_fields].sort_values(by=['city','year'],ignore_index=True)\n\nint_fields = ['year','population','hom','Philly']\ncrimes[int_fields] = crimes[int_fields].astype(int)\n\n# This is still a total of 71 possible donors\n#crimes.to_csv('PrepUCR.csv',index=False)\n\n#crimes['year'] = crimes['year'].astype(int)\ncrimes.sort_values(by=['Philly','city','year'], inplace=True, ignore_index=True)\n\n# Check out this file in Github page\ncrimes.to_csv('TopCitiesHomRate.csv',index=False)\n\n# This produces 100 seperate plots, not quite what I want!\n#crimes.groupby('city').plot(x='year', y='hom_rate', kind='line')\n\n# Homicide rates, all cities\nyear_lab = np.arange(2000,2020,2)\nun_cities = list(pd.unique(crimes['city']))\n\nfig, ax = plt.subplots(figsize=(8,6))\n\nfor c in un_cities:\n sd = crimes[crimes['city'] == c]\n lab = 0\n if c == 'Philadelphia':\n ax.plot(sd['year'], sd['hom_rate'], label='Philly', color='red', linewidth=2)\n elif c == un_cities[0]:\n ax.plot(sd['year'], sd['hom_rate'], label='Other City', color='grey', linewidth=1)\n else:\n ax.plot(sd['year'], sd['hom_rate'], color='grey', linewidth=1)\n\nax.set_axisbelow(True)\nax.set_xticks(year_lab)\nax.set_title('Homicide Rate per 100,000')\nax.legend(loc='upper right')\nplt.show()\n\n\n# Homicide counts, all cities\n\nfig, ax = plt.subplots(figsize=(8,6))\n\nfor c in un_cities:\n sd = crimes[crimes['city'] == c]\n lab = 0\n if c == 'Philadelphia':\n ax.plot(sd['year'], sd['hom'], label='Philly', color='red', linewidth=2)\n elif c == un_cities[0]:\n ax.plot(sd['year'], sd['hom'], label='Other City', color='grey', linewidth=1)\n else:\n ax.plot(sd['year'], sd['hom'], color='grey', linewidth=1)\n\nax.set_axisbelow(True)\nax.set_xticks(year_lab)\nax.set_title('Homicide Count')\n#ax.legend(loc='upper right')\nax.legend(bbox_to_anchor=(1.05,0.5))\nplt.show()\n\n\n# Weighted aggregate rate, Philly and prosecutor type\nct = crimes.groupby(['Philly','type','year'], as_index=False)['hom','population'].sum()\nct['hom_rate'] = (ct['hom']/ct['population'])*100000\n\nrep_dict = {'0Traditional': 'Traditional',\n '0Middle': 'Middle',\n '0Progressive': 'Progressive',\n '1Progressive': 'Philly'}\n\nct['group'] = (ct['Philly'].astype(str) + ct['type']).replace(rep_dict)\n\n# Now lets do for each a line\ncolors = ['#ca0020',\n '#f4a582',\n '#92c5de',\n '#0571b0']\n\nfig, ax = plt.subplots(figsize=(8,6))\n\nfor g,c in zip(rep_dict.values(),colors):\n sd = ct[ct['group'] == g]\n ax.plot(sd['year'], sd['hom_rate'], label=g, color=c)\n\nax.set_axisbelow(True)\nax.set_xticks(year_lab)\nax.set_title('Homicide Rate per 100,000')\nax.legend(loc='upper right')\nplt.show()\n\n","repo_name":"apwheele/Blog_Code","sub_path":"Python/SynthEstimates/prepUCR.py","file_name":"prepUCR.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"23479244490","text":"# CamJam EduKit 3 - Robotics\n# Worksheet 8 - Line Following Robot\n\nimport time # Import the Time library\nfrom gpiozero import Robot, LineSensor # Import the GPIO Zero Library\n\n# Set variables for the line detector GPIO pin\npinLineFollowerRight = 11\npinLineFollowerLeft = 8\n\nlinesensorright = LineSensor(pinLineFollowerRight)\nlinesensorleft = LineSensor(pinLineFollowerLeft)\nrobot = Robot(left=(17,18), right=(22,23))\n\n# Set the relative speeds of the two motors, between 0.0 and 1.0\nleftmotorspeed = 0.36\nrightmotorspeed = 0.33\n\nmotorforward = (leftmotorspeed, rightmotorspeed)\nmotorbackward = (-leftmotorspeed, -rightmotorspeed)\nmotorleft = (leftmotorspeed, 0)\nmotorright = (0, rightmotorspeed)\n\nrightisoverblack = False # A flag to say the robot can see a black line\nleftisoverblack = False # A flag to say the robot can see a black line\nlinelost = False # A flag that is set if the line has been lost\n\n\n# Define the functions that will be called when the line is\n# detected or not detected\ndef linenotseenright():\n global rightisoverblack, linelost\n print(\"The right white has been found.\")\n rightisoverblack = False\n linelost = False\n robot.value = motorforward\n\n\ndef lineseenright():\n global rightisoverblack\n print(\"The right white has been lost.\")\n rightisoverblack = True\n\n# Define the functions that will be called when the line is\n# detected or not detected\ndef linenotseenleft():\n global leftisoverblack, linelost\n print(\"The left white has been found.\")\n leftisoverblack = False\n linelost = False\n robot.value = motorforward\n\n\ndef lineseenleft():\n global leftisoverblack\n print(\"The left white has been lost.\")\n leftisoverblack = True\n\n\n# Search for the white\ndef seeklineright():\n global linelost\n robot.stop()\n\n print(\"Seeking white right\")\n\n seektime = 0.15 # Turn for 0.25s\n seekcount = 1 # A count of times the robot has looked for the line\n maxseekcount = 10 # The maximum time to seek the line in one direction\n\n # Turn the robot left and right until it finds the line\n # Or we have looked long enough\n while seekcount <= maxseekcount:\n\n # Start the motors turning in a direction\n robot.value = motorright\n\n # Save the time it is now\n starttime = time.time()\n\n # While the robot is turning for seektime seconds,\n # check to see whether the line detector is over black\n while (time.time() - starttime) <= seektime:\n if not rightisoverblack:\n robot.value = motorforward\n # Exit the seekline() function returning\n # True - the line was found\n return True\n\n # The robot has not found the black line yet, so stop\n robot.stop()\n\n # Increase the seek count\n seekcount += 1\n\n # The line wasn't found, so return False\n robot.stop()\n print(\"The right line has been lost - relocate your robot\")\n linelost = True\n return False\n\n# Search for the white\ndef seeklineleft():\n global linelost\n robot.stop()\n\n print(\"Seeking white left\")\n\n seektime = 0.15 # Turn for 0.25s\n seekcount = 1 # A count of times the robot has looked for the line\n maxseekcount = 10 # The maximum time to seek the line in one direction\n\n # Turn the robot left and right until it finds the line\n # Or we have looked long enough\n while seekcount <= maxseekcount:\n\n # Start the motors turning in a direction\n robot.value = motorleft\n\n # Save the time it is now\n starttime = time.time()\n\n # While the robot is turning for seektime seconds,\n # check to see whether the line detector is over black\n while (time.time() - starttime) <= seektime:\n if not leftisoverblack:\n robot.value = motorforward\n # Exit the seekline() function returning\n # True - the line was found\n return True\n\n # The robot has not found the black line yet, so stop\n robot.stop()\n\n # Increase the seek count\n seekcount += 1\n\n # The line wasn't found, so return False\n robot.stop()\n print(\"The left line has been lost - relocate your robot\")\n linelost = True\n return False\n\n\n# Tell the program what to do with a line is seen\nlinesensorright.when_line = lineseenright\n# And when no line is seen\nlinesensorright.when_no_line = linenotseenright\n\n\n# Tell the program what to do with a line is seen\nlinesensorleft.when_line = lineseenleft\n# And when no line is seen\nlinesensorleft.when_no_line = linenotseenleft\n\ntry:\n # repeat the next indented block forever\n robot.value = motorforward\n while True:\n if rightisoverblack and not linelost:\n seeklineright()\n if leftisoverblack and not linelost:\n seeklineleft()\n\n\n# If you press CTRL+C, cleanup and stop\nexcept KeyboardInterrupt:\n exit()","repo_name":"mathFonseca/Oficinas_2","sub_path":"arquivos-teste/sensores_plus_motores.py","file_name":"sensores_plus_motores.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40165701842","text":"import socket\n\nfrom Projects.EllaConsole.PrintFunctions import Print\n\nUDP_IP = \"127.0.0.1\"\nUDP_PORT = 515\n\nsock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\nsock.bind((UDP_IP, UDP_PORT))\n\n\nwhile True:\n\n data = None\n try:\n data, addr = sock.recvfrom(100*1024) # buffer size is 1024 bytes\n except:\n data = \"[error]:\" + \"Message is bigger than buffer\"\n\n concatString = \"\"\n\n data = str(data)[2:-1]\n Print(data)\n\n\n","repo_name":"edgecom/EllaDevelopmentTools","sub_path":"EllaConsole/EllaConsole.py","file_name":"EllaConsole.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71310660394","text":"#!/Users/yuzhang/anaconda3/bin/python\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-n', '--name', help = 'the prefix')\nargs = parser.parse_args()\n\nname = args.name+'_'\ncount = 0\nfor filename in os.listdir(\"./\"):\n if name in filename and filename.endswith('.txt') and '_avg.txt' not in filename:\n if 'msd' in locals():\n temp = np.loadtxt(filename)\n if len(temp[0]) == len(msd[0]):\n msd[:, 1:] += temp[:, 1:]\n count = [i+1 for i in count]\n elif len(temp[0]) > len(msd[0]):\n print('Previous length:', len(msd[0]), end = ' ')\n print('Current length:', len(temp[0]))\n count = [i+1 for i in count]+[1 for _ in range(len(temp[0])-len(msd[0]))]\n msd[:, 1:] += temp[:, 1:len(msd[0])]\n msd = np.concatenate((msd, temp[:, len(msd[0]):]), axis = 1)\n else:\n print('Previous length:', len(msd[0]), end = ' ')\n print('Current length:', len(temp[0]))\n msd[:, 1:len(temp[0])] += temp[:, 1:]\n for i in range(len(temp[0])-1):\n count[i] += 1\n else:\n msd = np.loadtxt(filename)\n count = [1 for _ in range(len(msd[0])-1)]\nmsd[:, 1:] /= np.array(count)\n\nfig = plt.figure(figsize = (16,12), dpi = 300)\nfsize = 28\nlwidth = 4.0\nmsize = 16.0\n\ncolors = ['red', 'blue', 'green', 'pink']\n\npt_start = len(msd)//2\nfor i in range(1, len(msd[0])):\n plt.plot(msd[:, 0], msd[:, i], linewidth = lwidth, color = colors[i-1], label = 'mol '+str(i))\n coef = np.polyfit(msd[pt_start:, 0], msd[pt_start:, i], 1)\n print(coef*10000/4, ' E-10 m^2/s')\nfor axes in fig.axes:\n axes.tick_params(labelsize = fsize, width = 2, length = 6, pad = 10)\n for direction in ['top', 'bottom', 'left', 'right']:\n axes.spines[direction].set_linewidth(2)\nplt.xlabel('Time (ps)', fontsize = fsize)\nplt.ylabel('MSD ($\\mathregular{nm^2\\!}$)', fontsize = fsize)\nplt.legend(loc = 'best', fontsize = fsize)\nplt.tight_layout()\nplt.savefig(name+'avg.pdf')\nnp.savetxt(name+'avg.txt', msd, fmt = ['%12.6f' for i in msd[0]])\n","repo_name":"YuZhangIsCoding/MD_scripts","sub_path":"combine_msd_all.py","file_name":"combine_msd_all.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15237904939","text":"nR=int(input())\r\nfor run in range(nR):\r\n\tdata=input().split()\r\n\tN=int(data[0])\r\n\tM=int(data[1])\r\n\tK=int(data[2])\r\n\t\r\n\tmini=K\r\n\tfor i in range(2,N+1):\r\n\t\tfor j in range(2,M+1):\r\n\t\t\tloss=0\r\n\t\t\trm=0\r\n\t\t\tfor k in range(min(i,j)//2):\r\n\t\t\t\tfor m in range(5):\r\n\t\t\t\t\tif 2*k==min(i,j) and m>2:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tif i*j-loss input_size:\n self.increase_dim = SPDIncreaseDim(input_size, output_size)\n input_size = output_size\n self.weight = StiefelParameter(torch.DoubleTensor(1,1,input_size, output_size).to(self.device),\n requires_grad=True)\n # self.weight = StiefelParameter(torch.DoubleTensor(input_size, output_size).to(self.device),\n # requires_grad=True)\n nn.init.orthogonal_(self.weight)\n #一会注释掉,attention的前向传播维度要对应\n #以前遇到过,一旦成为参数向量 就不能更改维度了,\n # self.weight=self.weight.unsqueeze(0).unsqueeze(0)\n\n def forward(self, input):\n output = input\n if self.increase_dim:\n output = self.increase_dim(output)\n #权重维度可能有问题 后面改 现在走到这里是1,1,200,100\n #正确的这里是200 ,100 所以去掉两个维度\n weight = self.weight.squeeze(0).squeeze(0).unsqueeze(0)\n # weight = self.weight.unsqueeze(0)\n weight = weight.expand(input.size(0), -1, -1)\n output = torch.bmm(weight.transpose(1, 2), torch.bmm(output, weight))\n\n return output\n\n class SPDIncreaseDim(nn.Module):\n\n def __init__(self, input_size, output_size):\n super(SPDIncreaseDim, self).__init__()\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.register_buffer('eye', torch.eye(output_size, input_size).to(self.device))\n add = torch.as_tensor([0] * input_size + [1] * (output_size - input_size), dtype=torch.float32)\n add = add.to(self.device)\n self.register_buffer('add', torch.diag(add))\n\n def forward(self, input):\n eye = self.eye.unsqueeze(0)\n eye = eye.expand(input.size(0), -1, -1)\n add = self.add.unsqueeze(0)\n add = add.expand(input.size(0), -1, -1)\n\n output = torch.baddbmm(add, eye, torch.bmm(input, eye.transpose(1, 2)))\n\n return output\n\n class AttentionManifold(nn.Module):\n def __init__(self, in_embed_size, out_embed_size):\n super(AttentionManifold, self).__init__()\n\n self.d_in = in_embed_size\n self.d_out = out_embed_size\n\n self.q_trans = SPDTransform(self.d_in, self.d_out).cpu()\n self.k_trans = SPDTransform(self.d_in, self.d_out).cpu()\n self.v_trans = SPDTransform(self.d_in, self.d_out).cpu()\n\n def tensor_log(self, t): # 4dim\n u, s, v = th.svd(t)\n return u @ th.diag_embed(th.log(s)) @ v.permute(0, 1, 3, 2)\n\n def tensor_exp(self, t): # 4dim\n # condition: t is symmetric!\n s, u = th.linalg.eigh(t)\n return u @ th.diag_embed(th.exp(s)) @ u.permute(0, 1, 3, 2)\n\n def log_euclidean_distance(self, A, B):\n inner_term = self.tensor_log(A) - self.tensor_log(B)\n inner_multi = inner_term @ inner_term.permute(0, 1, 3, 2)\n _, s, _ = th.svd(inner_multi)\n final = th.sum(s, dim=-1)\n return final\n\n def LogEuclideanMean(self, weight, cov):\n # cov:[bs, #p, s, s]\n # weight:[bs, #p, #p]\n bs = cov.shape[0]\n num_p = cov.shape[1]\n size = cov.shape[2]\n cov = self.tensor_log(cov).view(bs, num_p, -1)\n output = weight @ cov # [bs, #p, -1]\n output = output.view(bs, num_p, size, size)\n return self.tensor_exp(output)\n\n def forward(self, x, shape=None):\n if len(x.shape) == 3 and shape is not None:\n x = x.view(shape[0], shape[1], self.d_in, self.d_in)\n # x = x.to(th.float) # patch:[b, #patch, c, c]\n q_list = []\n k_list = []\n v_list = []\n # calculate Q K V\n bs = x.shape[0]\n m = x.shape[1]\n x = x.reshape(bs * m, self.d_in, self.d_in)\n Q = self.q_trans(x).view(bs, m, self.d_out, self.d_out)\n K = self.k_trans(x).view(bs, m, self.d_out, self.d_out)\n V = self.v_trans(x).view(bs, m, self.d_out, self.d_out)\n\n # calculate the attention score\n Q_expand = Q.repeat(1, V.shape[1], 1, 1)\n\n K_expand = K.unsqueeze(2).repeat(1, 1, V.shape[1], 1, 1)\n K_expand = K_expand.view(K_expand.shape[0], K_expand.shape[1] * K_expand.shape[2], K_expand.shape[3],\n K_expand.shape[4])\n\n atten_energy = self.log_euclidean_distance(Q_expand, K_expand).view(V.shape[0], V.shape[1], V.shape[1])\n atten_prob = nn.Softmax(dim=-2)(1 / (1 + th.log(1 + atten_energy))).permute(0, 2, 1) # now row is c.c.\n\n # calculate outputs(v_i') of attention module\n output = self.LogEuclideanMean(atten_prob, V)\n\n output = output.view(V.shape[0], V.shape[1], self.d_out, self.d_out)\n\n shape = list(output.shape[:2])\n shape.append(-1)\n\n # output = output.contiguous().view(-1, self.d_out, self.d_out)\n return output, shape\n #setup data and model\n class AfewNet(nn.Module):\n def __init__(self):\n super(__class__,self).__init__()\n dim=400\n dim1=200; dim2=100; dim3=50\n classes=7\n self.re=nn_spd.ReEig()\n # self.batchnorm1=nn_spd.BatchNormSPD(dim1)\n # self.bimap2=nn_spd.BiMap(1,1,dim1,dim2)\n # self.batchnorm2=nn_spd.BatchNormSPD(dim2)\n # self.bimap3=nn_spd.BiMap(1,1,dim2,dim3)\n # self.batchnorm3=nn_spd.BatchNormSPD(dim3)\n self.logeig=nn_spd.LogEig()\n self.linear=nn.Linear(dim3**2,classes).double()\n\n # self.bimap6 = nn_spd.BiMap(1,1,400,100)\n # self.batchnorm5 = nn_spd.BatchNormSPD(100)\n # self.bimap4 = nn_spd.BiMap(1,1,200,400)\n # self.batchnorm4 =nn_spd.BatchNormSPD(400)\n # self.bimap5 = nn_spd.BiMap(1, 1, 400, 200)\n # self.batchnorm5 = nn_spd.BatchNormSPD(200)\n self.att1 = AttentionManifold(200, 100)\n self.bimap11 = nn_spd.BiMap(1,1,400,200)\n self.bimap12 = nn_spd.BiMap(1,1,400,200)\n self.bimap13 = nn_spd.BiMap(1,1,400,200)\n self.bimap2 = nn_spd.BiMap(1,1,100,50)\n self.bn1 = nn_spd.BatchNormSPD(200)\n self.bn2 = nn_spd.BatchNormSPD(200)\n self.bn3 = nn_spd.BatchNormSPD(200)\n def forward(self,x):\n #30 1 400 400\n #生成3个进入atten\n #400->200\n x11 = self.re(self.bn1(self.bimap11(x)))\n x12 = self.re(self.bn2(self.bimap12(x)))\n x13 = self.re(self.bn3(self.bimap13(x)))\n x1 = torch.stack([x11,x12,x13]).squeeze(2).permute(1, 0, 2, 3)\n #经过att1变成float了 是不是att参数的问题\n x, shape = self.att1(x1)\n x = (torch.sum(x,dim=1).unsqueeze(1))/3\n x_spd=self.re((self.bimap2(x)))\n # x_spd_new = self.re(self.batchnorm4(self.bimap4(x_spd)))\n # x_spd_new = self.batchnorm5(self.bimap5(x_spd_new))\n #\n # x_spd = x_spd + x_spd_new\n #200->100\n # x_spd=self.re((self.bimap2(x_spd)))\n\n # x_spd = x_spd + self.re((self.bimap6(x)))\n\n #100->50\n # x_spd=self.batchnorm3(self.bimap3(x_spd))\n x_vec=self.logeig(x_spd).view(x_spd.shape[0],-1)\n y=self.linear(x_vec)\n return y\n model=AfewNet()\n\n #setup loss and optimizer\n loss_fn = nn.CrossEntropyLoss()\n opti = MixOptimizer(model.parameters(),lr=lr)\n\n #initial validation accuracy\n loss_val,acc_val=[],[]\n y_true,y_pred=[],[]\n gen = data_loader._test_generator\n model.eval()\n for local_batch, local_labels in gen:\n out = model(local_batch)\n l = loss_fn(out, local_labels)\n predicted_labels=out.argmax(1)\n y_true.extend(list(local_labels.cpu().numpy())); y_pred.extend(list(predicted_labels.cpu().numpy()))\n acc,loss=(predicted_labels==local_labels).cpu().numpy().sum()/out.shape[0], l.cpu().data.numpy()\n loss_val.append(loss)\n acc_val.append(acc)\n acc_val = np.asarray(acc_val).mean()\n loss_val = np.asarray(loss_val).mean()\n print('Initial validation accuracy: '+str(100*acc_val)+'%')\n\n #training loop\n for epoch in range(epochs):\n\n # train one epoch\n loss_train, acc_train = [], []\n model.train()\n i=0\n for local_batch, local_labels in data_loader._train_generator:\n i=i+1\n time_start = time.time()\n opti.zero_grad()\n out = model(local_batch)\n l = loss_fn(out, local_labels)\n acc, loss = (out.argmax(1)==local_labels).cpu().numpy().sum()/out.shape[0], l.cpu().data.numpy()\n loss_train.append(loss)\n acc_train.append(acc)\n l.backward()\n opti.step()\n time_end = time.time()\n time_c= time_end - time_start\n print(\"the %d time cost %.2f loss:%.2f acc:%.2f\" % (i,float(time_c), float(loss), float(acc)))\n acc_train = np.asarray(acc_train).mean()\n loss_train = np.asarray(loss_train).mean()\n print(\"the %d epoch cost loss:%.2f acc:%.2f\" % (epoch, float(loss_train), float(acc_train)))\n writer.add_scalar(\"train_loss\", loss_train, epoch)\n writer.add_scalar(\"train_acc\",acc_train,epoch)\n writer.flush()\n # validation\n loss_val,acc_val=[],[]\n y_true,y_pred=[],[]\n gen = data_loader._test_generator\n model.eval()\n j=0\n for local_batch, local_labels in gen:\n j = j+1\n out = model(local_batch)\n l = loss_fn(out, local_labels)\n predicted_labels=out.argmax(1)\n y_true.extend(list(local_labels.cpu().numpy())); y_pred.extend(list(predicted_labels.cpu().numpy()))\n acc,loss=(predicted_labels==local_labels).cpu().numpy().sum()/out.shape[0], l.cpu().data.numpy()\n loss_val.append(loss)\n acc_val.append(acc)\n print(\"the %d loss:%.2f acc:%.2f\" % (i,float(loss), float(acc)))\n\n acc_val = np.asarray(acc_val).mean()\n loss_val = np.asarray(loss_val).mean()\n print('Val acc: ' + str(100*acc_val) + '% at epoch ' + str(epoch + 1) + '/' + str(epochs))\n print(\"the %d epoch loss:%.2f acc:%.2f\" % (epoch, float(loss_val), float(acc_val)))\n\n writer.add_scalar(\"val_loss\", loss_val, epoch)\n writer.add_scalar(\"val_acc\", acc_val, epoch)\n if epoch % 50 == 0:\n th.save(model.state_dict(),'attenepoch'+str(epoch)+'.pt')\n\n\n print('Final validation accuracy: '+str(100*acc_val)+'%')\n return acc_val\n\nif __name__ == \"__main__\":\n\n data_path='data/afew/'\n batch_size=30 #batch size\n\n class DatasetSPD(data.Dataset):\n def __init__(self, path, names):\n self._path = path\n self._names = names\n def __len__(self):\n return len(self._names)\n def __getitem__(self, item):\n x = np.load(self._path + self._names[item])[None, :, :].real\n x = th.from_numpy(x).double()\n y = int(self._names[item].split('.')[0].split('_')[-1])\n y = th.from_numpy(np.array(y)).long()\n return x,y\n\n class DataLoaderAFEW:\n def __init__(self,data_path,batch_size):\n path_train,path_test=data_path+'train/',data_path+'val/'\n for filenames in os.walk(path_train):\n names_train = sorted(filenames[2])\n for filenames in os.walk(path_test):\n names_test = sorted(filenames[2])\n self._train_generator=data.DataLoader(DatasetSPD(path_train,names_train),batch_size=batch_size,shuffle='True')\n self._test_generator=data.DataLoader(DatasetSPD(path_test,names_test),batch_size=batch_size,shuffle='False')\n\n afew(DataLoaderAFEW(data_path,batch_size))\n","repo_name":"a912289748/Grnet_building_deep_networks_on_grassmann_manifolds","sub_path":"experiments/afew_atten.py","file_name":"afew_atten.py","file_ext":"py","file_size_in_byte":13375,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10324138520","text":"import math\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\n\r\nnum = 1000\r\nX = [i / num for i in range(0, num)]\r\nZ = 0.1\r\nC_sold = []\r\nC_snew = []\r\nk = 0.1\r\nC_0 = 0.1\r\n#C_E = 0.4\r\nC_snewar = np.asarray([C_0 for i in range(0, num)])\r\nC_soldar = np.asarray([C_0 for i in range(0, num)])\r\nn = 8\r\n\r\ndef test(n, C_0, k, Z, num, C_snewar, C_soldar):\r\n\r\n '''设置初始浓度'''\r\n for i in range(1, n + 1):\r\n if n == 1:\r\n C_snewar[0] = C_0 * k\r\n else:\r\n C_snewar[0] = k * (1 / (Z*num)) * sum(C_soldar[:int(Z * num)])\r\n\r\n C_l = [C_0 for i in range(0, num)]\r\n C_lar = np.asarray(C_l)\r\n\r\n '''计算浓度分布'''\r\n for j in range(1,num):\r\n if j <= int((1-Z) * num):\r\n delt_C = C_soldar[j + int(Z * num) - 1] - C_snewar[j - 1]\r\n dC_l = delt_C / (Z * num)\r\n if j == 1:\r\n C_lar[j] = C_snewar[0] / k + dC_l\r\n else:\r\n C_lar[j] = C_lar[j - 1] + dC_l\r\n C_snewar[j] = k * C_lar[j]\r\n if j > int((1-Z) * num):\r\n C_lar[j] = C_lar[j - 1] * (num - j + 2 - 2 * k) / (num - j)\r\n C_snewar[j] = k * C_lar[j]\r\n\r\n '''交换新旧浓度'''\r\n C_soldar = C_snewar\r\n\r\n plt.plot(X[:950], C_snewar[:950])\r\n\r\n return C_snewar,C_soldar\r\n\r\ntest(n,C_0,k,Z,num,C_snewar,C_soldar)\r\nprint(sum(C_snewar[:899]))\r\nplt.show()\r\n\r\n\r\n","repo_name":"nigulasizhaosihe/zonerefing_final","sub_path":"zone refining.py","file_name":"zone refining.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13493604118","text":"import math\n\n\nclass Node:\n def __init__(self, value, nexxt=None):\n self.value = value\n self.next = nexxt\n\n def __repr__(self):\n return \"Node(value={}, next={})\".format(self.value, self.next)\n\n def __str__(self):\n return repr(self)\n\n def __len__(self):\n if not self.next:\n return 1\n return 1 + len(self.next)\n\n\ndef remove_duplicates_from_sorted_list(linked_list):\n initial = linked_list\n\n parent = linked_list\n linked_list = linked_list.next\n\n while linked_list:\n if linked_list.value == parent.value:\n temp = linked_list\n\n parent.next = linked_list.next\n linked_list = linked_list.next\n\n temp.next = None\n continue\n\n parent = linked_list\n linked_list = linked_list.next\n\n return initial\n\n\ndef shift_linked_list(linked_list: Node, k): # Verified on AlgoExpert\n k = k % len(linked_list)\n\n beg = linked_list\n end = linked_list\n for _ in range(k):\n end = end.next\n\n while end.next:\n beg = beg.next\n end = end.next\n\n end.next = linked_list\n initial = beg.next\n beg.next = None\n\n return initial\n\n\ndef reverse_linked_list(linked_list): # Verified\n prev = linked_list\n current = linked_list.next\n linked_list.next = None\n\n while current:\n temp = current.next\n current.next = prev\n prev = current\n current = temp\n\n return prev\n\n\ndef merge_linked_lists(list1, list2):\n result_list = temp = Node(\"/\")\n while list1 and list2:\n if list1.value < list2.value:\n temp.next = list1\n list1 = list1.next\n else:\n temp.next = list2\n list2 = list2.next\n temp = temp.next\n\n while list1:\n temp.next = list1\n temp = temp.next\n list1 = list1.next\n\n while list2:\n temp.next = list2\n temp = temp.next\n list2 = list2.next\n\n return result_list.next\n\n\ndef rearrange_linked_list(list1, k):\n current = list1\n less_start = less_than_k = Node(\"/\")\n more_start = more_than_k = Node(\"/\")\n equal_start = equal_to_k = Node(\"/\")\n while current:\n temp_next = current.next\n current.next = None\n\n if current.value == k:\n equal_to_k.next = current\n equal_to_k = current\n elif current.value < k:\n less_than_k.next = current\n less_than_k = current\n else:\n more_than_k.next = current\n more_than_k = current\n\n current = temp_next\n\n equal_to_k.next = more_start.next\n less_than_k.next = equal_start.next\n\n return less_start.next\n\n\ndef _split_linked_list(linked_list): # Verified\n slow = fast = linked_list\n\n while fast.next and fast.next.next:\n fast = fast.next.next\n slow = slow.next\n\n second_list = slow.next\n slow.next = None\n return linked_list, second_list\n\n\ndef zip_linked_list(list1):\n list1, list2 = _split_linked_list(list1)\n list2 = reverse_linked_list(list2) if list2 else None\n\n result_list = temp = Node(\"/\")\n while True:\n if not list1:\n break\n temp.next = list1\n list1 = list1.next\n temp = temp.next\n\n if not list2:\n continue\n\n temp.next = list2\n list2 = list2.next\n temp = temp.next\n\n return result_list.next\n\n\ndef linked_list_palindrome(linked_list): # Verified on Leetcode\n list1, list2 = _split_linked_list(linked_list)\n list2 = reverse_linked_list(list2) if list2 else None\n\n while True:\n if not list2:\n return True\n\n if list1.value != list2.value:\n return False\n\n list1 = list1.next\n list2 = list2.next\n\n\ndef remove_nth_node_from_end(linked_list, n: int): # Verified on Leetcode\n start = end = linked_list\n for _ in range(n - 1):\n end = end.next\n\n prev_node = None\n while end.next:\n prev_node = start\n start = start.next\n end = end.next\n\n if not prev_node:\n return linked_list.next\n prev_node.next = start.next\n\n return linked_list\n\ndef sum_of_linked_lists(list1, list2): # Verified on LeetCode\n result = temp = Node(\"/\")\n ptr1 = list1\n ptr2 = list2\n carry = 0\n\n while ptr1 and ptr2:\n new_value = ptr1.value + ptr2.value + carry\n carry = new_value // 10\n new_value = new_value % 10\n\n temp.next = Node(new_value)\n temp = temp.next\n\n ptr1 = ptr1.next\n ptr2 = ptr2.next\n\n while ptr1:\n new_value = ptr1.value + carry\n carry = new_value // 10\n new_value = new_value % 10\n\n temp.next = Node(new_value)\n temp = temp.next\n\n ptr1 = ptr1.next\n\n while ptr2:\n new_value = ptr2.value + carry\n carry = new_value // 10\n new_value = new_value % 10\n\n temp.next = Node(new_value)\n temp = temp.next\n\n ptr2 = ptr2.next\n\n if carry == 1:\n temp.next = Node(carry)\n\n return result.next\n\n\ndef main():\n linked_list = Node(1, Node(1, Node(2, Node(2, Node(3, Node(5, Node(5, Node(5))))))))\n print(\"Original: \", linked_list)\n linked_list = remove_duplicates_from_sorted_list(linked_list)\n print(linked_list)\n\n linked_list2 = Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7, Node(8, Node(9)))))))))\n print(\"Shifted Linked List: {}\".format(shift_linked_list(linked_list2, -2)))\n # print(\"Reverse Linked List: {}\".format(reverse_linked_list(linked_list2)))\n\n linked_list3 = Node(2, Node(6, Node(7, Node(8))))\n linked_list4 = Node(1, Node(3, Node(4, Node(4))))\n print(\"Merge Sorted Linked Lists: {}\".format(merge_linked_lists(linked_list3, linked_list4)))\n\n linked_list5 = Node(3, Node(0, Node(5, Node(3, Node(2, Node(1, Node(4, Node(-1))))))))\n print(\"Rearrange Linked List: {}\".format(rearrange_linked_list(linked_list5, 2.5)))\n\n print(\"Zip Linked List: {}\".format(zip_linked_list(linked_list2)))\n print(\"Linked List Palindrome: {}\".format(linked_list_palindrome(linked_list4)))\n\n linked_list6 = Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7, Node(8, Node(9)))))))))\n print(\"Remove nth Node from end of list: {}\".format(remove_nth_node_from_end(Node(1), 1)))\n\n num1 = Node(2, Node(4, Node(7, Node(1))))\n num2 = Node(9, Node(4, Node(5)))\n print(\"Sum of Linked Lists: {}\".format(sum_of_linked_lists(num1, num2)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"InderdeepSync/algoexpert_problems","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17497088165","text":"syslog = __import__('syslog')\n\nimport log\n\nclass SyslogObserver:\n def __init__(self, prefix):\n syslog.openlog(prefix)\n\n def emit(self, eventDict):\n edm = eventDict['message']\n if not edm:\n if eventDict['isError'] and eventDict.has_key('failure'):\n text = eventDict['failure'].getTraceback()\n elif eventDict.has_key('format'):\n text = eventDict['format'] % eventDict\n else:\n # we don't know how to log this\n return\n else:\n text = ' '.join(map(str, edm))\n\n lines = text.split('\\n')\n while lines[-1:] == ['']:\n lines.pop()\n\n firstLine = 1\n for line in lines:\n if firstLine:\n firstLine=0\n else:\n line = '\\t%s' % line\n syslog.syslog('[%s] %s' % (eventDict['system'], line))\n\ndef startLogging(prefix='Twisted', setStdout=1):\n obs = SyslogObserver(prefix)\n log.startLoggingWithObserver(obs.emit, setStdout=setStdout)\n","repo_name":"ActiveState/OpenKomodoIDE","sub_path":"contrib/twisted/TwistedCore-2.4.0/twisted/python/syslog.py","file_name":"syslog.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"72"} +{"seq_id":"32666062545","text":"from setuptools import setup, find_packages\n\nversion = '0.1'\n\nsetup(\n name='ckanext-dgu-local',\n version=version,\n description=\"DGU Local Authority Datasets\",\n long_description=\"\"\"\\\n \"\"\",\n classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n keywords='',\n author='Ross Jones / David Read',\n author_email='david.read@hackneyworkshop.com',\n url='https://github.com/datagovuk/ckanext-dgu-local',\n license='GPL3',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n namespace_packages=['ckanext', 'ckanext.dgulocal'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"requests>=1.1.0\",\n \"lxml>=2.2.4\",\n \"GeoAlchemy>=0.6\",\n \"Shapely>=1.2.13\"\n ],\n entry_points=\\\n \"\"\"\n [ckan.plugins]\n # dgu_local will provide any UI/search enhancements\n dgu_local=ckanext.dgulocal.plugin:LocalPlugin\n\n # inventory_harvester is used for harvesting from the Inventory format.\n inventory_harvester=ckanext.dgulocal.harvester:InventoryHarvester\n\n [paste.paster_command]\n dgulocal=ckanext.dgulocal.commands:Command\n \"\"\",\n)\n","repo_name":"datagovhr/data.gov.hr","sub_path":"src/ckanext-dgu-local/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"9622540699","text":"from PlayerMissile import *\nimport RunningValues\nfrom Collision import Collision\nimport Explosion\nimport Sound\n\n\nclass PlayerTurret(Collision):\n missile = 0\n\n time_since_last_fire = 0\n\n def __init__(self, canvas: Canvas, width, height):\n\n self.height = 40\n self.width = 50\n self.canvas = canvas\n\n self.x = width / 2 + self.width / 2\n self.y = height - self.height * 2\n self.shape = canvas.create_rectangle(self.x, self.y, self.x + self.width, self.y + self.height,\n fill=\"purple\")\n\n def move(self, bx: int):\n bx = bx * 3\n self.x = self.x + bx\n self.canvas.move(self.shape, bx, 0)\n\n def render(self):\n return\n\n def cleanup(self):\n self.canvas.delete(self.shape)\n\n def fire(self):\n now = RunningValues.unix_time_millis()\n if now > self.time_since_last_fire:\n RunningValues.render_list.append(\n PlayerMissile(self.canvas, self.x + self.width / 2, self.y - self.height / 2))\n self.time_since_last_fire = now + 500\n\n def hit(self, other):\n if str(type(other)).__contains__(\"Invader\"):\n Sound.hit()\n lives = RunningValues.lives_variable.get()\n lives = lives - 1\n RunningValues.lives_variable.set(lives)\n Explosion.create_explosion(self.canvas, self.x + self.width / 2, self.y - self.height / 2)\n RunningValues.player_dead()\n","repo_name":"bernardjason/invaders","sub_path":"PlayerTurret.py","file_name":"PlayerTurret.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5352477829","text":"import preprocessing_func as pf\nimport pickle as pkl\n\n'''\nThis is a test file for the recommendation model\nRemember to change the path to the pickle file if you want to run this file\nChange the input to whatever you want to test\n'''\n\ninput = \"A restaurant with a nice view of the bay\"\n\ndef res_recommend(input, P, Q, userid_vectorizer):\n '''\n Input: A string input that describes the user's preference\n Output: A dataframe of top 5 recommendations\n To ultilize the output, you need to use the index of the dataframe to get the business_id Example: topRecommendations.index\n '''\n \n test_df= pf.pd.DataFrame([input], columns=['text'])\n test_df['text'] = test_df['text'].apply(pf.clean_text)\n test_vectors = userid_vectorizer.transform(test_df['text'])\n test_v_df = pf.pd.DataFrame(test_vectors.toarray(), index=test_df.index, columns=userid_vectorizer.get_feature_names_out())\n\n predictItemRating = pf.pd.DataFrame(pf.np.dot(test_v_df.loc[0],Q.T),index=Q.index,columns=['Rating'])\n topRecommendations = pf.pd.DataFrame.sort_values(predictItemRating,['Rating'],ascending=[0])[:5]\n '''\n This part is to print out the top 5 recommendations\n If you want to print out the top 5 recommendations, uncomment this part\n Please include restDF as an argument in the function to use this part\n \n \n for i in topRecommendations.index:\n if i in restDF['business_id'].values:\n name = restDF[restDF['business_id'] == i]['name'].iloc[0]\n categories = restDF[restDF['business_id'] == i]['categories'].iloc[0]\n stars = str(restDF[restDF['business_id']==i]['stars'].iloc[0])\n rating = topRecommendations.loc[i, 'Rating']\n \n print(f'Name: {name}\\nCategories: {categories}\\nStars: {stars}\\nRecommendation rating: {rating}\\n')\n print('')\n '''\n return topRecommendations\n","repo_name":"billsasi/eatup","sub_path":"Recommendation_model/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38899645455","text":"import requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nimport time\n\n# Amazon product to observe\nURL = 'https://www.amazon.it/gp/product/B01N9VBVN1?pf_rd_p=df2ad00e-5897-4a61-99bf-8e35bfa9a01e&pf_rd_r=NC8X7GGF26JMDVBYAW7Q&th=1'\n# https://www.whatismybrowser.com/detect/what-is-my-user-agent\nheaders = {\"User-Agent\": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}\n\ndef checking_price(): \n\tpage = requests.get(URL, headers=headers)\n\tsoup = BeautifulSoup(page.content, 'html.parser')\n\n\t# Finding product name and price\n\ttitle = soup.find(id=\"productTitle\").get_text()\n\tprice = soup.find(id=\"priceblock_ourprice\").get_text()\n\tconverted_price = int(price[0:3])\n\t\n\tprint(title.strip())\n\tprint(converted_price)\n\n\t# Chosing threshold value (e.g. 200€)\n\tif(converted_price < 200): \n\t\tsending_email(converted_price)\n\ndef sending_email(price):\n\tserver = smtplib.SMTP('smtp.gmail.com', 587)\n\tserver.ehlo()\n\tserver.starttls()\n\tserver.ehlo()\n\n\t# https://support.google.com/accounts/answer/185833?hl=en-GB\n\tserver.login('your_email@gmail.com', 'password')\n\n\tsubject = 'Price fall down!'\n\tbody = f\"Price fall down at {price} !\\n\\n Check amazon link {URL}\"\n\n\tmsg = f\"Subject: {subject}\\n\\n{body}\"\n\n\tserver.sendmail(\n\t\t'from_email@gmail.com',\n\t\t'to_email@gmail.com',\n\t\tmsg\n\t)\n\tprint('EMAIL SENT!')\n\n\tserver.quit()\n\nwhile(True):\n\tchecking_price()\n\ttime.sleep(3600)\n\n\n\n\n\n\n\n","repo_name":"alexpi007/amazon_price_scraper","sub_path":"amazon_price_observer.py","file_name":"amazon_price_observer.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14145719204","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Simulation théorème fondamentaux\n\n# In[2]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport scipy.stats as st\n\n\n# ## Illustration de la loi des grands nombres\n\n# In[3]:\n\n\nn = 10000\nm = 10000\n\nmean = np.pi/2\nsigma = 0.1\n\n\n# On fixe la graine (seed) de manière à pouvoir reproduire les résultats\nnp.random.seed(seed=959)\n\n# Génération d'une matrice (m,n) d'observations indépendantes issues de la loi normale N(mean,sigma^2)\n# Cette matrice peut être vue comme m échantillons de n observations indépendantes\nX = st.norm.rvs(loc = mean, scale = sigma, size = (m,n))\n\n# Pour chaque échantillon, (càd chaque ligne de la matrice) on calcule la moyenne cumulative et la moyenne\ncumulative_mean = X.cumsum(axis = 1)/np.arange(1,n+1)\nfinal_mean = X.mean(axis = 1)\n\nepsilon = np.linspace(0,1,m+1)[1:]\n\n\n# In[4]:\n\n\nfig = plt.figure(figsize = (10,6))\nfig.suptitle(r\"Évolution de l'estimation de la moyenne empirique $\\bar{X}_n$\")\nfig.supxlabel(\"Nombre d'observation (n)\")\nfig.supylabel(r\"$\\bar{X}_n$\")\nfor i in range(0,5):\n # Pour plusieurs échantillons on fait le graphe de l'évolution de la moyenne empirique losque la taille de l'échantillon augmente\n plt.plot(np.arange(1,m+1),cumulative_mean[i,:],linewidth = 1)\nplt.show()\n\n\n# In[5]:\n\n\nfig = plt.figure(figsize = (10,6))\nfig.suptitle(r\"Évolution de l'estimation de la probabilité $\\mathbb{P}(| \\bar{X} - \\mu | > \\epsilon)$ en fonction de $\\epsilon$\")\nfig.supxlabel(\"$\\epsilon$\")\nfig.supylabel(r\"$\\hat{\\mathbb{P}}(| \\bar{X} - \\mu | > \\epsilon)$\")\n\n# Estimation de la probabilité (= calcul proportion) que la différence avec l'espérance soit supérieure à un seuil epsilon\nprob = (abs(final_mean-mean)>epsilon.reshape(epsilon.shape[0],1)).mean(axis=1)\n\nplt.semilogx(epsilon,prob,label = r\"$\\hat{\\mathbb{P}}(| \\bar{X} - \\mu | > \\epsilon)$\")\n\n# Borne donnée par l'inégalité de Tchebychev\nplt.semilogx(epsilon,np.array([min(1,sigma**2/(n*eps**2)) for eps in epsilon]),\n label = r\"Borne de Tchebychev en fonction de $\\epsilon$: $\\min\\left(1,\\frac{\\sigma^2}{n\\epsilon^2}\\right)$\")\n\nfig.legend(loc=[0.53,0.76])\nplt.show()\n\n\n# In[6]:\n\n\nfig = plt.figure(figsize = (10,6))\nfig.suptitle(r\"Évolution de l'estimation de la probabilité $\\hat{\\mathbb{P}}(| \\bar{X}_n - \\mu | > 10^{-2})$\")\nfig.supxlabel(\"Nombre d'observation (n)\")\nfig.supylabel(r\"$\\hat{\\mathbb{P}}(| \\bar{X}_n - \\mu | > 10^{-2})$\")\n\n# Estimation de la probabilité (= calcul proportion) que la déviation de l'espérance soit supérieur à un seuil fixe \n# pour une taille d'échantillon variable\n\nprob = (abs(cumulative_mean-mean)>10e-2).mean(axis=0)\n\nplt.semilogx(np.arange(1,n+1),prob,label = r\"$\\hat{\\mathbb{P}}(| \\bar{X} - \\mu | > \\epsilon)$\")\n\n# Borne donnée par l'inégalité de Tchebychev\nplt.semilogx(np.arange(1,n+1),np.array([min(1,sigma**2/(n_*10**(-4))) for n_ in np.arange(1,n+1)]),\n label = r\"Borne de Techbychev en fonction de $n$: $\\min(1,\\frac{\\sigma^2)}{n\\epsilon^2}$\")\n\nfig.legend(loc=[0.135,0.56])\nplt.show()\n\n\n# ## Contre-exemple loi des grands nombres\n\n# In[7]:\n\n\nn = 100000\nm = 50\n\nnp.random.seed(seed=959)\n\n# Generation de m échantillons de n observations indépendantes de varables de Cauchy\nX = st.cauchy.rvs(size = (m,n))\n\n# Calcul de la moyenne cumulative pour chaque échantillon\ncumulative_mean = X.cumsum(axis = 1)/np.arange(1,n+1)\n\n\n# Sur le graphique suivant, on peut voir l'évolution de la moyenne empirique d'échantillons d'observations de la loi de Cauchy pour des tailles d'échantilons grandissants. On remarque que la moyenne empirique ne semble pas converger vers une valeur fixe. Ceci est dû au fait que l'espérance de la loi de Cauchy est infinie et par conséquent, la loi des grands nombres ne s'applique pas.\n\n# In[8]:\n\n\nfig = plt.figure(figsize = (10,6))\nfig.suptitle(\"Évolution de la moyenne empirique d'une variable de Cauchy de m = 50 échantillons\")\nfig.supxlabel(\"Nombre d'observation\")\nfor i in range(0,50):\n # Plot de l'évolution de la moyenne empirique losque le nombre d'observations augmente\n plt.plot(np.arange(1,n+1),cumulative_mean[i,:],linewidth = 1)\nplt.show()\n\n\n# ## Illustration du théorème central limite avec la loi exponentielle\n\n# In[9]:\n\n\nn = 10000\nm = 10000\n\nlambda_ = 0.5\nnp.random.seed(seed=959)\n\n# Génération de variables exponentielles\nX = st.expon.rvs(scale=1/lambda_,size=(m,n))\n\nmean, var = st.expon.stats(scale = 1/lambda_,moments=\"mv\")\n\n# Standardisation des variables exponentielles\nZ = (X-mean)/np.sqrt(var/n)\n\n\n# In[10]:\n\n\nfig, axs = plt.subplots(2,2,figsize = (10,8))\nfig.suptitle(r\"Convergence de la fonction de répartition empirique de $Z_n = \\frac{\\bar{X}_n - \\mu}{\\sigma / \\sqrt{n}}$\")\n\nx = np.linspace(-5,5,1000).reshape(1000,1)\n\n# On plot la fonction de répartition de la moyenne empirique (estimé sur n = 10000 observtaions) avec m = 10 échantillons\naxs[0,0].plot(x,(Z[:10].mean(axis = 1) 100000):\n self.bal += self.bal*5/100\n print(f\"Your Updated Balance is : {self.bal}\")\n else:\n print(\"Acc balance too low\")\n \n\nclass employee(banking):\n def __init__(self, emptype, wtime):\n self.emptype = emptype\n self.wtime = wtime\n self.gsal = 0\n self.bsal = 0\n \n if(self.emptype == \"class1\"):\n self.bsal = 30000\n self.da = self.bsal*121/100\n elif(self.emptype == \"class2\"):\n self.bsal = 20000\n self.da = self.bsal*115/100\n else:\n print(\"Undefined employee type\")\n \n self.hra = self.bsal*30/100 \n \n def calculate(self):\n self.gsal = self.bsal + self.da + self.hra\n print(f\"Your Gross salary is : {self.gsal}\")\n \n def update(self):\n if(self.wtime > 15):\n self.gsal += self.gsal*20/100\n print(f\"Your Salary with Bonus is : {self.gsal}\")\n else:\n print(\"You have worked too less\")\n \nc1 = customer(\"saving\", 1000)\nc2 = customer(\"current\", 1000000000)\ne1 = employee(\"class1\", 10)\ne2 = employee(\"class2\", 20)\n\nc1.calculate()\nc2.calculate()\ne1.calculate()\ne2.calculate()\n\nc1.update()\nc2.update()\ne1.update()\ne2.update()\n ","repo_name":"AbhigyanBafna/2yearsOfLabs","sub_path":"python/6_abstracts.py","file_name":"6_abstracts.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"31861198587","text":"'''\n\n정석적인 풀이로 풀면 시간 초과가 난다.\n어쩔 수 없이 최악의 경우 O(2^N)이기 때문..\n'''\n\n\nn,k = map(int,input().split(' '))\narr = []\nx = [0 for _ in range(n)]\nfor i in range(n):\n arr.append(list(map(int,input().split(' '))))\n#가성비 좋은 순서대로 정렬\narr.sort(key= lambda x:(x[1]/x[0]),reverse= True)\n\n#arr[cnt]가 들어갈지 말지 결정, 현재 배낭의 크기 size\nMP = 0\n\ndef Knapsack(cnt,size):\n global MP\n P = 0\n S = 0\n for i in range(n):\n if x[i] == 1:\n P += arr[i][1]\n S += arr[i][0]\n\n if cnt >= n or size <= 0:\n MP = max(MP,P)\n return\n\n #현재 들어가 있는 물건의 가치의 합과 무게의 합\n\n\n if arr[cnt][0] <= size:\n B = fractional(cnt+1,size-arr[cnt][0])\n if MP < P + arr[cnt][1] + B:\n x[cnt] = 1\n Knapsack(cnt+1,size-arr[cnt][0])\n\n\n B = fractional(cnt+1,size)\n if MP < P + B:\n x[cnt] = 0\n Knapsack(cnt+1,size)\n\ndef fractional(cnt,size):\n B = 0\n S = size\n for i in range(cnt,n):\n if arr[cnt][0] <= S:\n B += arr[cnt][1]\n S -= arr[cnt][0]\n else:\n temp = 0\n while arr[cnt][0] >= temp and S >= 0:\n temp += 1\n B += arr[cnt][1]/arr[cnt][0]\n S -= 1\n\n return B\n\nKnapsack(0,k)\nprint(MP)\n\n\n","repo_name":"chanyoung1998/Algorithm-Study","sub_path":"백준 문제풀기/파이썬/다이나믹 프로그래밍/algorithm_12865.py","file_name":"algorithm_12865.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16481287678","text":"from networkapi.wagtailpages.factory.libraries.rcc import (\n author_index as author_index_factory,\n)\nfrom networkapi.wagtailpages.factory.libraries.rcc import (\n detail_page as detail_page_factory,\n)\nfrom networkapi.wagtailpages.factory.libraries.rcc import (\n landing_page as landing_page_factory,\n)\nfrom networkapi.wagtailpages.factory.libraries.rcc import (\n library_page as library_page_factory,\n)\nfrom networkapi.wagtailpages.tests import base as test_base\nfrom networkapi.wagtailpages.tests.libraries.rcc import utils\n\n\nclass RCCTestCase(test_base.WagtailpagesTestCase):\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n cls._setup_rcc_structure(homepage=cls.homepage)\n\n @classmethod\n def _setup_rcc_structure(cls, homepage):\n cls.landing_page = landing_page_factory.RCCLandingPageFactory(\n parent=homepage,\n title=\"RCC Playbook\",\n aside_cta__0=\"cta\",\n aside_cta__1=\"cta\",\n )\n cls.library_page = library_page_factory.RCCLibraryPageFactory(\n parent=cls.landing_page,\n )\n cls.author_index = author_index_factory.RCCAuthorsIndexPageFactory(\n parent=cls.landing_page,\n title=\"Browse Authors\",\n )\n\n @staticmethod\n def create_rcc_detail_page_on_parent(*, parent, days_ago=0):\n publication_date = utils.days_ago(n=days_ago)\n return detail_page_factory.RCCDetailPageFactory(\n parent=parent,\n original_publication_date=publication_date,\n )\n\n @staticmethod\n def make_page_private(page):\n utils.make_page_private(page)\n\n def create_rcc_detail_page(self, days_ago=0):\n return self.create_rcc_detail_page_on_parent(\n parent=self.library_page,\n days_ago=days_ago,\n )\n\n def setUp(self):\n self.synchronize_tree()\n","repo_name":"MozillaFoundation/foundation.mozilla.org","sub_path":"network-api/networkapi/wagtailpages/tests/libraries/rcc/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":344,"dataset":"github-code","pt":"72"} +{"seq_id":"29393298440","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys,os\nimport argparse\nimport subprocess\n\n\nprint(\"Easy Fasta Splitter V2.0\")\nprint(\"By: Patrick Gagne (patg97@hotmail.com)\")\n\n# Changelog\n# 1. Remaining sequences are now divided between the containers instead of the last one\n# 2. Main writing loop now use the file_index to determine the current container maximum size\n\nparser=argparse.ArgumentParser(description='Split fasta into multipe files')\n\nparser.add_argument(\"-f\",\"--fas\", dest=\"fasta_file\", required=True, help=(\"Fasta file to split [REQUIRED]\"))\nparser.add_argument(\"-s\",\"--split\", dest=\"split_level\", required=True,type=int, help=(\"Number of files to generate [REQUIRED]\"))\n\nargs=parser.parse_args()\n\nif args.split_level <= 1:\n print(\"ERROR: Split level must be greater than 1\")\n sys.exit(1)\n\nprint(\"Counting sequences in %s\"%(args.fasta_file))\npos_err=0\n\n# Using posix commands to count sequence faster (using native unix commands grep and wc)\nif os.name == \"posix\":\n print(\"POSIX system detected, POSIX command will be use to count sequence faster\")\n try:\n count=int(subprocess.check_output(\"grep '>' %s | wc -l\"%(args.fasta_file), shell=True).split()[0])\n except:\n pos_err=1\n print(\"Unexpected error on the posix command, reverting to standard python count\")\n\n# In case system is non posix (Windows for exemple) or if grep / wc command fail, use python commands (slower but safe)\nif os.name != \"posix\" or pos_err==1:\n if pos_err == 0:\n print(\"Non POSIX system detected, Python will be used to count\")\n count=0\n try:\n with open(args.fasta_file,\"r\") as infile:\n for line in infile:\n if line[0] == \">\":\n count+=1\n except IOError:\n print(\"ERROR: Fasta file %s not found or cannot be opened\"%(args.fasta_file))\n sys.exit(1)\n\nif args.split_level > count:\n print(\"ERROR: Split level (%i) greater then sequence count (%i)\"%(args.split_level,count))\n print(\"Please reduce split level\")\n sys.exit(1)\n\n\nprint(\"\\nSequence count: %i\"%(count))\n\n# Calculating size for each container and calculating the remain sequences for the last file\ncontainer_size=int(count/args.split_level)\ncontainer_mod=count%args.split_level\n\n# Creating list of size and splitting the renaming sequences between them (+1 sequence on some containers)\nsize_list=[container_size]*args.split_level\nfor i in range(container_mod):\n size_list[i]+=1\n\n\n# If split level is a multiple of container_size, each container will be the same size\nif container_mod > 0:\n print(\"Some files will contain %i sequences and some will contain %i sequences\"%(container_size,container_size+1))\nelse:\n print(\"Each file will contain %i sequences\\n\"%(container_size))\n\n\n\n\n# Open a serie of file which will contains the sequences and use a list to switch between them\nfile_list=[]\nfor i in range(1,args.split_level+1):\n file_list.append(open(args.fasta_file+\".\"+str(i),\"w\"))\n\nseqnum=0\nfile_ind=0\nwith open(args.fasta_file,\"r\") as infile:\n for line in infile:\n # Case for the first sequence of the file (one time only)\n if seqnum == 0:\n print(\"Generating \"+args.fasta_file+\".\"+str(file_ind+1))\n file_list[file_ind].write(line)\n seqnum+=1\n continue\n if line[0] == \">\":\n # When you reach the current container size, switch to the next using the list index\n if seqnum+1>size_list[file_ind]:\n file_ind+=1\n seqnum=0\n print(\"Generating \"+args.fasta_file+\".\"+str(file_ind+1))\n \n file_list[file_ind].write(line)\n seqnum+=1\n # For DNA lines, nothing to be done except dump them in the current file\n else:\n file_list[file_ind].write(line)\n \n# Close all file in the list\nprint(\"Closing files\")\nfor i in file_list:\n i.close()\n\nprint(\"\\nFasta Splitting DONE\\n\")\n\n","repo_name":"Patg13/Fasta_EasySplit","sub_path":"Fasta_EasySplit.py","file_name":"Fasta_EasySplit.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42015800545","text":"from typing import Dict, List, Optional, Tuple, Union, overload # noqa\n\nimport bionty as bt\nimport numpy as np\nfrom django.db import models\nfrom lamin_utils import logger\nfrom lnschema_core.models import CanValidate, HasParents, Registry, User\nfrom lnschema_core.users import current_user_id\n\nfrom . import ids\nfrom ._bionty import create_or_get_organism_record, encode_uid, lookup2kwargs\n\n\nclass BioRegistry(Registry, HasParents, CanValidate):\n \"\"\"Base Registry of lnschema_bionty.\n\n BioRegistry inherits all methods from :class:`~lamindb.dev.Registry` and provides additional methods\n including :meth:`~lnschema_bionty.dev.BioRegistry.bionty` and :meth:`~lnschema_bionty.dev.BioRegistry.from_bionty`\n\n Notes:\n For more info, see tutorials:\n\n - :doc:`/lnschema-bionty`\n - :doc:`bio-registries`\n \"\"\"\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n # DB-facing constructor\n if len(args) == len(self._meta.concrete_fields):\n super(BioRegistry, self).__init__(*args, **kwargs)\n return None\n\n # passing lookup result from bionty, which is a Tuple or List\n if args and len(args) == 1 and isinstance(args[0], (Tuple, List)) and len(args[0]) > 0:\n if isinstance(args[0], List) and len(args[0]) > 1:\n logger.warning(\"multiple lookup/search results are passed, only returning record from the first entry\")\n result = lookup2kwargs(self, *args, **kwargs) # type:ignore\n # exclude \"parents\" from query arguments\n query_kwargs = {k: v for k, v in result.items() if k != \"parents\"}\n existing_record = self.filter(**query_kwargs).one_or_none()\n if existing_record is not None:\n from lamindb._registry import init_self_from_db\n\n init_self_from_db(self, existing_record)\n return None\n else:\n kwargs = result # result already has encoded id\n args = ()\n # all other cases require encoding the id\n else:\n kwargs = encode_uid(orm=self, kwargs=kwargs)\n\n # raise error if no organism is passed\n if hasattr(self.__class__, \"organism_id\"):\n if kwargs.get(\"organism\") is None and kwargs.get(\"organism_id\") is None:\n import lnschema_bionty as lb\n\n if lb.settings.organism is not None:\n kwargs[\"organism\"] = lb.settings.organism\n else:\n raise RuntimeError(\"please pass a organism!\")\n elif kwargs.get(\"organism\") is not None:\n if not isinstance(kwargs.get(\"organism\"), Organism):\n raise TypeError(\"organism must be a `lnschema_bionty.Organism` record\")\n\n # now continue with the user-facing constructor\n # set the direct parents as a private attribute\n # this is a list of strings that store the ontology id\n if \"parents\" in kwargs:\n parents = kwargs.pop(\"parents\")\n # this checks if we receive a np.ndarray from pandas\n if isinstance(parents, (list, np.ndarray)) and len(parents) > 0:\n if not isinstance(parents[0], str):\n raise ValueError(\"not a valid parents kwarg, got to be list of ontology ids\")\n self._parents = parents\n\n super().__init__(*args, **kwargs)\n\n @classmethod\n def bionty(\n cls,\n organism: Optional[Union[str, Registry]] = None,\n bionty_source: Optional[\"BiontySource\"] = None,\n **kwargs,\n ) -> \"bt.Bionty\":\n \"\"\"The corresponding Bionty object.\n\n e.g. lnschema_bionty.CellType.bionty() is equivalent to bionty.CellType().\n Note that the public source is auto-configured and tracked via :meth:`lnschema_bionty.BiontySource`.\n\n See Also:\n `Bionty `__\n\n Examples:\n >>> celltype_bt = lb.CellType.bionty()\n >>> celltype_bt\n CellType\n Organism: all\n Source: cl, 2023-04-20\n #terms: 2698\n ...\n 📖 CellType.df(): ontology reference table\n 🔎 CellType.lookup(): autocompletion of terms\n 🎯 CellType.search(): free text search of terms\n 🧐 CellType.inspect(): check if identifiers are mappable\n 👽 CellType.standardize(): map synonyms to standardized names\n 🔗 CellType.ontology: Pronto.Ontology object\n \"\"\"\n if cls.__module__.startswith(\"lnschema_bionty.\"):\n if organism is None and kwargs.get(\"species\") is not None:\n organism = kwargs.get(\"species\")\n organism_record = create_or_get_organism_record(organism=organism, orm=cls)\n\n if bionty_source is not None:\n organism = bionty_source.organism\n source = bionty_source.source\n version = bionty_source.version\n else:\n organism = organism_record.name if organism_record is not None else None\n source = None\n version = None\n bionty_object = getattr(bt, cls.__name__)(organism=organism, source=source, version=version)\n\n return bionty_object\n\n @classmethod\n def from_bionty(cls, **kwargs) -> Optional[Union[\"BioRegistry\", List[\"BioRegistry\"]]]:\n \"\"\"Create a record or records from bionty based on a single field value.\n\n Notes:\n For more info, see tutorial :doc:`/lnschema-bionty`\n\n Bulk create protein records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n Create a record by passing a field value:\n\n >>> record = lb.Gene.from_bionty(symbol=\"TCF7\", organism=\"human\")\n\n Create a record from non-default source:\n\n >>> bionty_source = lb.BiontySource.filter(entity=\"CellType\", source=\"cl\", version=\"2022-08-16\").one() # noqa\n >>> record = lb.CellType.from_bionty(name=\"T cell\", bionty_source=bionty_source)\n\n \"\"\"\n # non-relationship kwargs\n kv = {k: v for k, v in kwargs.items() if k not in [i.name for i in cls._meta.fields if i.is_relation]}\n if len(kv) > 1:\n raise AssertionError(\"Only one field can be passed to generate record from Bionty\")\n elif len(kv) == 0:\n return None\n else:\n k = next(iter(kv))\n v = kwargs.pop(k)\n results = cls.from_values([v], field=getattr(cls, k), **kwargs)\n if len(results) == 1:\n return results[0]\n elif len(results) == 0:\n return None\n else:\n return results\n\n def _save_ontology_parents(self, mute: bool = False): # saving records of parents\n if hasattr(self, \"_parents\"):\n import lamindb as ln\n\n parents = self._parents\n # here parents is still a list of ontology ids\n logger.info(f\"also saving parents of {self}\")\n kwargs: Dict = {}\n if hasattr(self, \"bionty_source\"):\n kwargs = {\"bionty_source\": self.bionty_source}\n parents_records = self.from_values(parents, self.__class__.ontology_id, **kwargs)\n ln.save(parents_records, mute=mute)\n self.parents.set(parents_records)\n\n def save(self, parents: Optional[bool] = None, *args, **kwargs) -> None:\n \"\"\"Save the record and its parents recursively.\n\n Args:\n parents: `bool = True`. Whether to save parents records.\n \"\"\"\n # save the record first without parents\n super(BioRegistry, self).save(*args, **kwargs)\n from .dev._settings import settings\n\n if parents is None:\n parents = settings.auto_save_parents\n\n if parents:\n self._save_ontology_parents()\n\n\nclass Organism(BioRegistry):\n \"\"\"Organism - `NCBI Taxonomy `__, `Ensembl Organism `__.\n\n Examples:\n >>> record = lb.Organism.from_bionty(name=\"rabbit\")\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n name = models.CharField(max_length=64, db_index=True, default=None, unique=True)\n \"\"\"Name of a organism, required field.\"\"\"\n ontology_id = models.CharField(max_length=32, unique=True, db_index=True, null=True, default=None)\n \"\"\"NCBI Taxon ID.\"\"\"\n scientific_name = models.CharField(max_length=64, db_index=True, unique=True, null=True, default=None)\n \"\"\"Scientific name of a organism.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent organism records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True)\n \"\"\":class:`~lnschema_bionty.BiontySource` this record associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"organism\")\n \"\"\"Files linked to the organism.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"organism\")\n \"\"\"Datasets linked to the organism.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(User, models.PROTECT, default=current_user_id, related_name=\"created_organism\")\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n @overload\n def __init__(\n self,\n name: str,\n taxon_id: Optional[str],\n scientific_name: Optional[str],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(Organism, self).__init__(*args, **kwargs)\n\n\nclass Gene(BioRegistry):\n \"\"\"Genes - `Ensembl `__, `NCBI Gene `__.\n\n Notes:\n Bulk create Gene records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.Gene.from_bionty(symbol=\"TCF7\", organism=\"human\")\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=12, default=ids.gene)\n \"\"\"A universal id (hash of selected field).\"\"\"\n symbol = models.CharField(max_length=64, db_index=True, null=True, default=None)\n \"\"\"A unique short form of gene name.\"\"\"\n stable_id = models.CharField(max_length=64, db_index=True, null=True, default=None, unique=True)\n \"\"\"Stable ID of a gene that doesn't have ensembl_gene_id, e.g. a yeast gene.\"\"\"\n ensembl_gene_id = models.CharField(max_length=64, db_index=True, null=True, default=None, unique=True)\n \"\"\"Ensembl gene stable ID, in the form ENS[organism prefix][feature type prefix][a unique eleven digit number].\"\"\"\n ncbi_gene_ids = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) NCBI Gene IDs that correspond to this Ensembl Gene ID.\n NCBI Gene ID, also known as Entrez Gene ID, in the form of numeric string, 1 to 9 digits.\n \"\"\"\n biotype = models.CharField(max_length=64, db_index=True, null=True, default=None)\n \"\"\"Type of the gene.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the gene.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this gene.\"\"\"\n organism = models.ForeignKey(Organism, models.PROTECT, default=None, related_name=\"genes\")\n \"\"\":class:`~lnschema_bionty.Organism` this gene associates with.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"genes\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this gene associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"genes\")\n \"\"\"Files linked to the gene.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"genes\")\n \"\"\"Datasets linked to the gene.\"\"\"\n feature_sets = models.ManyToManyField(\"lnschema_core.FeatureSet\", related_name=\"genes\")\n \"\"\"Featuresets linked to this gene.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(User, models.PROTECT, default=current_user_id, related_name=\"created_genes\")\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n @overload\n def __init__(\n self,\n symbol: Optional[str],\n stable_id: Optional[str],\n ensembl_gene_id: Optional[str],\n ncbi_gene_ids: Optional[str],\n biotype: Optional[str],\n description: Optional[str],\n synonyms: Optional[str],\n organism: Optional[Organism],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(Gene, self).__init__(*args, **kwargs)\n\n\nclass Protein(BioRegistry):\n \"\"\"Proteins - `Uniprot `__.\n\n Notes:\n Bulk create Protein records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.Protein.from_bionty(name=\"Synaptotagmin-15B\", organism=\"human\")\n >>> record = lb.Protein.from_bionty(gene_symbol=\"SYT15B\", organism=\"human\")\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=12, default=ids.protein)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=64, db_index=True, null=True, default=None)\n \"\"\"Unique name of a protein.\"\"\"\n uniprotkb_id = models.CharField(max_length=10, db_index=True, null=True, default=None, unique=True)\n \"\"\"UniProt protein ID, 6 alphanumeric characters, possibly suffixed by 4 more.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this protein.\"\"\"\n length = models.BigIntegerField(db_index=True, null=True)\n \"\"\"Length of the protein sequence.\"\"\"\n gene_symbol = models.CharField(max_length=64, db_index=True, null=True, default=None)\n \"\"\"The primary gene symbol corresponds to this protein.\"\"\"\n ensembl_gene_ids = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) Ensembl Gene IDs that correspond to this protein.\"\"\"\n organism = models.ForeignKey(Organism, models.PROTECT, default=None, related_name=\"proteins\")\n \"\"\":class:`~lnschema_bionty.Organism` this protein associates with.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"proteins\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this protein associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"proteins\")\n \"\"\"Files linked to the protein.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"proteins\")\n \"\"\"Datasets linked to the protein.\"\"\"\n feature_sets = models.ManyToManyField(\"lnschema_core.FeatureSet\", related_name=\"proteins\")\n \"\"\"Featuresets linked to this protein.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_proteins\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n @overload\n def __init__(\n self,\n name: Optional[str],\n uniprotkb_id: Optional[str],\n synonyms: Optional[str],\n length: Optional[int],\n gene_symbol: Optional[str],\n ensembl_gene_ids: Optional[str],\n organism: Optional[Organism],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(Protein, self).__init__(*args, **kwargs)\n\n\nclass CellMarker(BioRegistry):\n \"\"\"Cell markers - `CellMarker `__.\n\n Notes:\n Bulk create CellMarker records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.CellMarker.from_bionty(name=\"PD1\", organism=\"human\")\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=12, default=ids.cellmarker)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=64, db_index=True, default=None, unique=True)\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this cell marker.\"\"\"\n gene_symbol = models.CharField(max_length=64, db_index=True, null=True, default=None)\n \"\"\"Gene symbol that corresponds to this cell marker.\"\"\"\n \"\"\"Unique name of the cell marker.\"\"\"\n ncbi_gene_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"NCBI gene id that corresponds to this cell marker.\"\"\"\n uniprotkb_id = models.CharField(max_length=10, db_index=True, null=True, default=None)\n \"\"\"Uniprotkb id that corresponds to this cell marker.\"\"\"\n organism = models.ForeignKey(Organism, models.PROTECT, default=None, related_name=\"cell_markers\")\n \"\"\":class:`~lnschema_bionty.Organism` this cell marker associates with.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"cell_markers\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this cell marker associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"cell_markers\")\n \"\"\"Files linked to the cell marker.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"cell_markers\")\n \"\"\"Datasets linked to the cell marker.\"\"\"\n feature_sets = models.ManyToManyField(\"lnschema_core.FeatureSet\", related_name=\"cell_markers\")\n \"\"\"Featuresets linked to this cell marker.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_cell_markers\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n @overload\n def __init__(\n self,\n name: str,\n synonyms: Optional[str],\n gene_symbol: Optional[str],\n ncbi_gene_id: Optional[str],\n uniprotkb_id: Optional[str],\n organism: Optional[Organism],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(CellMarker, self).__init__(*args, **kwargs)\n\n\nclass Tissue(BioRegistry):\n \"\"\"Tissues - `Uberon `__.\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create Tissue records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.Tissue.from_bionty(name=\"brain\")\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the tissue.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the tissue.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of tissue.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this tissue.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the tissue.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent tissues records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"tissues\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this tissue associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"tissues\")\n \"\"\"Files linked to the tissue.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"tissues\")\n \"\"\"Datasets linked to the tissue.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(User, models.PROTECT, default=current_user_id, related_name=\"created_tissues\")\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"Tissue\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(Tissue, self).__init__(*args, **kwargs)\n\n\nclass CellType(BioRegistry):\n \"\"\"Cell types - `Cell Ontology `__.\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create CellType records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.CellType.from_bionty(name=\"T cell\")\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the cell type.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the cell type.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of cell type.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this cell type.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the cell type.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent cell type records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"cell_types\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this cell type associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"cell_types\")\n \"\"\"Files linked to the cell type.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"cell_types\")\n \"\"\"Datasets linked to the cell type.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_cell_types\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"CellType\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(CellType, self).__init__(*args, **kwargs)\n\n\nclass Disease(BioRegistry):\n \"\"\"Diseases - `Mondo `__, `Human Disease `__.\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create Disease records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.Disease.from_bionty(name=\"Alzheimer disease\")\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the disease.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the disease.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of disease.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this disease.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the disease.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent disease records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"diseases\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this disease associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"diseases\")\n \"\"\"Files linked to the disease.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"diseases\")\n \"\"\"Datasets linked to the disease.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_diseases\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"Disease\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(Disease, self).__init__(*args, **kwargs)\n\n\nclass CellLine(BioRegistry):\n \"\"\"Cell lines - `Cell Line Ontology `__.\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create CellLine records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> standard_name = lb.CellLine.bionty().standardize([\"K562\"])[0]\n >>> record = lb.CellLine.from_bionty(name=standard_name)\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the cell line.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the cell line.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of cell line.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this cell line.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the cell line.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent cell line records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"cell_lines\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this cell line associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"cell_lines\")\n \"\"\"Files linked to the cell line.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"cell_lines\")\n \"\"\"Datasets linked to the cell line.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_cell_lines\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"CellLine\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(CellLine, self).__init__(*args, **kwargs)\n\n\nclass Phenotype(BioRegistry):\n \"\"\"Phenotypes - `Human Phenotype `__,\n `Phecodes `__,\n `Mammalian Phenotype `__,\n `Zebrafish Phenotype `__.\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create Phenotype records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.Phenotype.from_bionty(name=\"Arachnodactyly\")\n >>> record.save()\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the phenotype.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the phenotype.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of phenotype.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this phenotype.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the phenotype.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent phenotype records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"phenotypes\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this phenotype associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"phenotypes\")\n \"\"\"Files linked to the phenotype.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"phenotypes\")\n \"\"\"Datasets linked to the phenotype.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_phenotypes\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"Phenotype\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(Phenotype, self).__init__(*args, **kwargs)\n\n\nclass Pathway(BioRegistry):\n \"\"\"Pathways - `Gene Ontology `__,\n `Pathway Ontology `__.\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create Pathway records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.Pathway.from_bionty(ontology_id=\"GO:1903353\")\n >>> record.save()\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the pathway.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the pathway.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of pathway.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this pathway.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the pathway.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent pathway records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"pathways\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this pathway associates with.\"\"\"\n genes = models.ManyToManyField(\"Gene\", related_name=\"pathways\")\n \"\"\"Genes that signifies the pathway.\"\"\"\n feature_sets = models.ManyToManyField(\"lnschema_core.FeatureSet\", related_name=\"pathways\")\n \"\"\"Featuresets linked to the pathway.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"pathways\")\n \"\"\"Files linked to the pathway.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"pathways\")\n \"\"\"Datasets linked to the pathway.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_pathways\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"Pathway\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(Pathway, self).__init__(*args, **kwargs)\n\n\nclass ExperimentalFactor(BioRegistry):\n \"\"\"Experimental factors - `Experimental Factor Ontology `__.\n\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create ExperimentalFactor records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> standard_name = lb.ExperimentalFactor.bionty().standardize([\"scRNA-seq\"])\n >>> record = lb.ExperimentalFactor.from_bionty(name=standard_name)\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the experimental factor.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the experimental factor.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of experimental factor.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this experimental factor.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the experimental factor.\"\"\"\n molecule = models.TextField(null=True, default=None, db_index=True)\n \"\"\"Molecular experimental factor, parsed from EFO.\"\"\"\n instrument = models.TextField(null=True, default=None, db_index=True)\n \"\"\"Instrument used to measure the experimental factor, parsed from EFO.\"\"\"\n measurement = models.TextField(null=True, default=None, db_index=True)\n \"\"\"Phenotypic experimental factor, parsed from EFO.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent experimental factor records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"experimental_factors\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this experimental_factors associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"experimental_factors\")\n \"\"\"Files linked to the experimental_factors.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"experimental_factors\")\n \"\"\"Datasets linked to the experimental factor.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_experimental_factors\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"ExperimentalFactor\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(ExperimentalFactor, self).__init__(*args, **kwargs)\n\n\nclass DevelopmentalStage(BioRegistry):\n \"\"\"Developmental stages - `Human Developmental Stages `__,\n `Mouse Developmental Stages `__. # noqa\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create DevelopmentalStage records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.DevelopmentalStage.from_bionty(name=\"neurula stage\")\n >>> record.save()\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the developmental stage.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the developmental stage.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of developmental stage.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this developmental stage.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the developmental stage.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent developmental stage records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"developmental_stages\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this developmental stage associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"developmental_stages\")\n \"\"\"Files linked to the developmental stage.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"developmental_stages\")\n \"\"\"Datasets linked to the developmental stage.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_developmental_stages\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"DevelopmentalStage\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(DevelopmentalStage, self).__init__(*args, **kwargs)\n\n\nclass Ethnicity(BioRegistry):\n \"\"\"Ethnicity - `Human Ancestry Ontology `__.\n\n Notes:\n For more info, see tutorial :doc:`bio-registries`\n\n Bulk create Ethnicity records via :class:`~lamindb.dev.Registry.from_values`.\n\n Examples:\n >>> record = lb.Ethnicity.from_bionty(name=\"European\")\n >>> record.save()\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.ontology)\n \"\"\"A universal id (hash of selected field).\"\"\"\n name = models.CharField(max_length=256, db_index=True)\n \"\"\"Name of the ethnicity.\"\"\"\n ontology_id = models.CharField(max_length=32, db_index=True, null=True, default=None)\n \"\"\"Ontology ID of the ethnicity.\"\"\"\n abbr = models.CharField(max_length=32, db_index=True, unique=True, null=True, default=None)\n \"\"\"A unique abbreviation of ethnicity.\"\"\"\n synonyms = models.TextField(null=True, default=None)\n \"\"\"Bar-separated (|) synonyms that correspond to this ethnicity.\"\"\"\n description = models.TextField(null=True, default=None)\n \"\"\"Description of the ethnicity.\"\"\"\n parents = models.ManyToManyField(\"self\", symmetrical=False, related_name=\"children\")\n \"\"\"Parent ethnicity records.\"\"\"\n bionty_source = models.ForeignKey(\"BiontySource\", models.PROTECT, null=True, related_name=\"ethnicities\")\n \"\"\":class:`~lnschema_bionty.BiontySource` this ethnicity associates with.\"\"\"\n files = models.ManyToManyField(\"lnschema_core.File\", related_name=\"ethnicities\")\n \"\"\"Files linked to the ethnicity.\"\"\"\n datasets = models.ManyToManyField(\"lnschema_core.Dataset\", related_name=\"ethnicities\")\n \"\"\"Datasets linked to the ethnicity.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_ethnicities\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"name\", \"ontology_id\"),)\n\n @overload\n def __init__(\n self,\n name: str,\n ontology_id: Optional[str],\n abbr: Optional[str],\n synonyms: Optional[str],\n description: Optional[str],\n parents: List[\"Ethnicity\"],\n bionty_source: Optional[\"BiontySource\"],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(Ethnicity, self).__init__(*args, **kwargs)\n\n\nclass BiontySource(Registry):\n \"\"\"Versions of public ontologies.\n\n .. warning::\n\n Do not modify the records unless you know what you are doing!\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n \"\"\"Internal id, valid only in one DB instance.\"\"\"\n uid = models.CharField(unique=True, max_length=8, default=ids.biontysource)\n \"\"\"A universal id (hash of selected field).\"\"\"\n entity = models.CharField(max_length=64, db_index=True)\n \"\"\"Entity class name.\"\"\"\n organism = models.CharField(max_length=64, db_index=True)\n \"\"\"Organism name, use 'all' if unknown or none applied.\"\"\"\n currently_used = models.BooleanField(default=False, db_index=True)\n \"\"\"Whether this record is currently used.\"\"\"\n source = models.CharField(max_length=64, db_index=True)\n \"\"\"Source key, short form, CURIE prefix for ontologies.\"\"\"\n source_name = models.TextField(blank=True, db_index=True)\n \"\"\"Source full name, long form.\"\"\"\n version = models.CharField(max_length=64, db_index=True)\n \"\"\"Version of the source.\"\"\"\n url = models.TextField(null=True, default=None)\n \"\"\"URL of the source file.\"\"\"\n md5 = models.TextField(null=True, default=None)\n \"\"\"Hash md5 of the source file.\"\"\"\n source_website = models.TextField(null=True, default=None)\n \"\"\"Website of the source.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n \"\"\"Time of creation of record.\"\"\"\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n \"\"\"Time of last update to record.\"\"\"\n created_by = models.ForeignKey(\n User,\n models.PROTECT,\n default=current_user_id,\n related_name=\"created_bionty_sources\",\n )\n \"\"\"Creator of record, a :class:`~lamindb.User`.\"\"\"\n\n class Meta:\n unique_together = ((\"entity\", \"source\", \"organism\", \"version\"),)\n\n @overload\n def __init__(\n self,\n entity: str,\n organism: str,\n currently_used: bool,\n source: str,\n version: str,\n source_name: Optional[str],\n url: Optional[str],\n md5: Optional[str],\n source_website: Optional[str],\n ):\n ...\n\n @overload\n def __init__(\n self,\n *db_args,\n ):\n ...\n\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n kwargs = encode_uid(orm=self, kwargs=kwargs)\n super(BiontySource, self).__init__(*args, **kwargs)\n\n\n# backward compat\nSpecies = Organism\n","repo_name":"laminlabs/lnschema-bionty","sub_path":"lnschema_bionty/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":48641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38424717887","text":"import glob\nimport os\n\n# print(os.getcwdu())\nfor fpath in glob.glob('u_ex*.log'):\n # print(f)\n print('start to read contents of ' + fpath)\n with open(fpath) as f:\n lines = f.readlines()\n print('{0} has {1} lines'.format(fpath, len(lines)))\n service_calls = [l for l in lines if '/service' in l]\n print('{0} has {1} service calls'.format(fpath, len(service_calls)))\n\n with open('parsed_' + fpath, 'w') as fwrite:\n fwrite.writelines(service_calls)","repo_name":"anderscui/ml","sub_path":"py/data_analysis/iis/iis_parser.py","file_name":"iis_parser.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30951852067","text":"import unittest\nfrom database import dbProxy\nfrom pymongo import MongoClient\nimport logicParser\nfrom database import dbProxy\nimport tifffile\n\n\nclass MyTestCase(unittest.TestCase):\n def test_something(self):\n #self.collectionTifProvider.find_one({})\n zr1 = dbProxy.getZeroFilmData4ExactLotNo(self.collectionTifProvider, 'Co-60 (MRRC)',\n '05062003', 24)\n print(zr1)\n self.assertEqual(zr1['dose'], 0.0)\n\n def test_build2d(self):\n fl = r'V:\\!Установки\\EBT3\\Без верификация фантом головы\\P_bv2_150dpi127.tif'\n im = tifffile.imread(fl)\n reds = im[0:im.shape[0], 0:im.shape[1], 0]\n reds1d = reds.flatten()\n print(reds)\n import matplotlib.pyplot as plt\n im2 = plt.imread(fl)\n plt.imshow(reds, cmap=plt.cm.jet)\n plt.show()\n self.assertEqual(True, True)\n\n def test_build2d_od(self):\n fl = r'V:\\!Установки\\EBT3\\Без верификация фантом головы\\P_bv2_150dpi127.tif'\n im = tifffile.imread(fl)\n reds = im[0:im.shape[0], 0:im.shape[1], 0]\n reds1d = reds.flatten()\n print(reds)\n\n # vl = dbProxy.getData4CalibrationCurveWithDoseHighLimit(self.collectionTifProvider, 'Co-60 (MRRC)',\n # '05062003', 24, 50.0)\n vd = dbProxy.getDict4ExactCurveWithDoseLimit(self.collectionTifProvider, 'Co-60 (MRRC)',\n '05062003', 24, 50.0)\n\n obj1 = logicParser.LogicParser(vd, logicParser.LogicODVariant.useWhiteOD, logicParser.LogicCurveVariants.useInterp1d)\n od1d = obj1.preparePixValue(reds1d)\n print(od1d)\n import matplotlib.pyplot as plt\n #im2 = plt.imread(fl)\n plt.imshow(od1d.reshape(reds.shape), cmap=plt.cm.jet)\n plt.show()\n\n def test_build2d_zDose(self):\n #fl = r'V:\\!Установки\\EBT3\\Без верификация фантом головы\\P_bv2_150dpi127.tif'\n fl = r'V:\\!Установки\\EBT3\\Калибровка от 05.09.2021 Со-60\\Lot 05062003\\24 часа после облучения\\G_2p5_150dpi_Lot2003.tif'\n im = tifffile.imread(fl)\n reds = im[0:im.shape[0], 0:im.shape[1], 0]\n reds1d = reds.flatten()\n print(reds)\n\n # vl = dbProxy.getData4CalibrationCurveWithDoseHighLimit(self.collectionTifProvider, 'Co-60 (MRRC)',\n # '05062003', 24, 50.0)\n vd = dbProxy.getDict4ExactCurveWithDoseLimit(self.collectionTifProvider, 'Co-60 (MRRC)',\n '05062003', 24, 50.0)\n\n obj1 = logicParser.LogicParser(vd, logicParser.LogicODVariant.useWhiteOD, logicParser.LogicCurveVariants.useSplev)\n od1d = obj1.preparePixValue(reds1d)\n print(od1d)\n dose1d = obj1.evaluateOD(od1d)\n print(dose1d)\n import matplotlib.pyplot as plt\n #im2 = plt.imread(fl)\n plt.imshow(dose1d.reshape(reds.shape), cmap=plt.cm.jet)\n plt.show()\n\n def setUp(self) -> None:\n self.client = MongoClient('mongodb://10.1.30.32:27017/')\n self.db = self.client['EBT_films_dose']\n self.collectionTifProvider = self.db['tifProvider']\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cobaltCorsair/EBT_films_dose","sub_path":"unittests/logicTime.py","file_name":"logicTime.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"676176448","text":"from keras.models import load_model\nimport numpy as np\nimport os\nfrom keras.utils import np_utils, plot_model\nimport matplotlib.pyplot as plt\nimport csv\n\nsampleLength = 32\nprintToBash = True\n\ndef createCNNLSTM(cnn1, cnn2, lstm):\n\tmodel = Sequential()\n\tif(cnn1>0):\n\t\tmodel.add(Conv1D(cnn1, kernel_size=3, activation='relu', input_shape = (sampleLength, 8)))\n\t\tmodel.add(Dropout(rate=0.5))\n\tif(cnn2>0):\n\t\tmodel.add(Conv1D(cnn2, kernel_size=3, activation='relu'))\n\t\tmodel.add(Dropout(rate=0.5))\n\tif(lstm>0):\n\t\tmodel.add(LSTM(lstm, return_sequences=True))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(num_gestures, activation='softmax'))\n\treturn model\n\ndef shuffleData(in_x, in_y):\n\tnewTrainX = []\n\tnewTrainY = []\n\tdataLen = len(in_x)\n\tk = np.arange(dataLen)\n\tnp.random.shuffle(k)\n\tfor i in k:\n\t\tnewTrainX.append(in_x[i])\n\t\tnewTrainY.append(in_y[i])\n\tnewTrainX = np.stack(newTrainX, axis = 0)\n\tnewTrainY = np.stack(newTrainY, axis = 0)\n\treturn newTrainX, newTrainY\n\n#get training data directories by pre-determined gestures\ngesture_names = ['Rest', 'Fist', 'Hold_Left', 'Hold_Right', 'Flower', 'Finger_Spread','Metal','Thumbs_Up','Peace']\n\n#get number of outputs\nnum_gestures = len(gesture_names)\n\nmodel_nodes = ['32_length_Sample_32cnn', '32_length_sample_64cnn', '32_length_sample_128cnn','32_length_sample_32_32cnn', '32_length_sample_16lstm','32_length_sample_32lstm','32_length_sample_64lstm', '32_Length_Sample_32_32_16', '32_Length_Sample_64_32_16', '32_Length_Sample_64_64_32', '32_Length_Sample_128_64_32']\n\n# Empty variable to contain array of models\nmodels = []\n\n# Load all model\nfor model in model_nodes:\n\tmodels.append(load_model('Models/'+model+'.h5'))\n\n# Singular Classification for each model\ngestureX = []\nclassification = 0\nclassificationRange = 1000\n\nfalse_positive = []\nfor i in range(len(gesture_names)):\n\tfalse_positive.append([])\n\nfor gesture in gesture_names:\n\tfor r, d, f in os.walk('Gestures/'+gesture):\n\t\tgesture_x = []\n\t\tfor file in f:\n\t\t\tif '.csv' in file:\n\t\t\t\t# data = pd.read_csv('Gestures/'+gesture+'/'+file)\n\t\t\t\tdata = np.genfromtxt('Gestures/'+gesture+'/'+file, delimiter=',')\n\t\t\t\tfileX = []\n\t\t\t\tlength = 0\n\t\t\t\tfor x in data:\n\t\t\t\t\t# if(len(gesture_x)=sampleLength):\n\t\t\t\t\t\tgesture_x.append(fileX)\n\t\t\t\t\t\tfileX = []\n\t\t\t\t\t\tlength=0\n\t\t\t\t\tfileX.append(np.asarray(x))\n\t\t\t\t\tlength = length + 1\n\t\t\t\t# Stack the list of numpy arrays\n\t\tk = np.arange(len(gesture_x))\n\t\tk = np.random.choice(k, 1000)\n\t\tnewX = []\n\t\tfor i in k:\n\t\t\tnewX.append(gesture_x[i])\n\t\tgestureX.append(newX)\n\t\tfor i in range(len(gesture_names)):\n\t\t\tif not i is classification:\n\t\t\t\tkk = np.arange(len(gesture_x))\n\t\t\t\tkk = np.random.choice(kk, 200)\n\t\t\t\tfor j in kk:\n\t\t\t\t\tfalse_positive[i].append(gesture_x[j])\n\tclassification = classification + 1\n\nmodel_acc = []\n# Iterate through each model\ncsv_data = []\nfor model, model_profile in zip(models,model_nodes):\n\t# Get classification Predictions\n\tcsv_d = []\n\tif(printToBash):\n\t\tprint('==========Classification Data for model_profile: ' + model_profile +'==========')\n\tcsv_d.append(model_profile)\n\tacc_gestures = []\n\t# Iterate through each gesture\n\t# Also provide false positive data to test\n\tfor gesture, gestureName, fp in zip(gestureX, gesture_names, false_positive):\n\t\tcount = 0\n\t\tfor datas in gesture:\n\t\t\tdata = np.array(datas)\n\t\t\tdata = data.reshape(1, data.shape[0], data.shape[1])\n\t\t\tprediction = model.predict_classes(data)\n\t\t\tif(gesture_names[prediction[0]]==gestureName):\n\t\t\t\tcount=count+1\n\t\tacc = count/len(gesture) * 100\n\t\tif(printToBash):\n\t\t\tprint('Gesture (' + gestureName + '): ' + str(acc)+'%')\n\t\tcsv_d.append(acc)\n\t\tcount = 0\n\t\tfor datas in fp:\n\t\t\tdata = np.array(datas)\n\t\t\tdata = data.reshape(1, data.shape[0], data.shape[1])\n\t\t\tprediction = model.predict_classes(data)\n\t\t\tif (gesture_names[prediction[0]]==gestureName):\n\t\t\t\tcount=count+1\n\t\tfpCount = count/len(fp) * 100\n\t\tif(printToBash):\n\t\t\tprint('False Positive (' + gestureName + '): ' + str(fpCount)+'%')\n\t\tcsv_d.append(fpCount)\n\t\tacc_gestures.append(acc)\n\tmodel_acc.append(acc_gestures)\n\tif(len(csv_d)>0):\n\t\tcsv_data.append(csv_d)\n\nwith open('modelResults.csv', 'w') as csvFile:\n\twriter = csv.writer(csvFile)\n\twriter.writerows(csv_data)\n\ncsvFile.close()\n#write the model accuracy\n","repo_name":"Rocksus/EMG-DroneControl","sub_path":"testMultiModel.py","file_name":"testMultiModel.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17632333208","text":"import os\nimport torch.distributed\nfrom typing import Tuple\nimport datetime\nfrom typing import Optional\n\nhostlist = None\n\n\ndef has_extra_work(len: int, world_size: Optional[int], rank: Optional[int]) -> bool:\n if (world_size or 1) == 1:\n return False\n\n rem = len % world_size\n return rank < rem\n\n\ndef is_work_uneven(len: int, world_size: Optional[int]) -> bool:\n return world_size is not None and len % world_size != 0\n\n\ndef get_work_slice(len: int, world_size: Optional[int], rank: Optional[int]) -> Tuple[int, int]:\n if (world_size or 1) == 1:\n return 0, len\n\n assert rank is not None, \"If world_size > 1, rank must be specified\"\n rem = len % world_size\n\n real_batch_size = len // world_size\n batch_offset = real_batch_size * rank + min(rank, rem)\n real_batch_size += int(rank < rem)\n\n return batch_offset, real_batch_size\n\n\nclass SLURMEnv:\n def __init__(self) -> None:\n global hostlist\n\n self.rank = os.getenv(\"SLURM_PROCID\")\n self.world_size = os.getenv(\"SLURM_NPROCS\")\n self.hostnames = os.getenv('SLURM_JOB_NODELIST')\n self.gpu_ids = os.getenv('SLURM_STEP_GPUS')\n self.job_id = os.getenv('SLURM_JOB_ID')\n\n self.is_distributed = self.rank is not None and self.world_size is not None and self.hostnames is not None and\\\n self.gpu_ids is not None\n\n if self.is_distributed:\n if hostlist is None:\n import hostlist\n\n self.rank = int(self.rank)\n self.world_size = int(self.world_size)\n self.hostnames = hostlist.expand_hostlist(self.hostnames)\n self.gpu_ids = self.gpu_ids.split(\",\")\n\n self.port = 12345 + int(min(self.gpu_ids))\n\n self.is_distributed = self.world_size > 1\n\n if not self.is_distributed:\n self.world_size = 1\n self.rank = 0\n\n def init_env(self):\n if not self.is_distributed:\n return\n\n print(f\"Initializing distributed environment. World size: {self.world_size}, master: {self.hostnames[0]}, \"\n f\"my rank {self.rank}\")\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(self.gpu_ids)\n torch.distributed.init_process_group('nccl', rank=self.rank, world_size=self.world_size,\n init_method=f\"tcp://{self.hostnames[0]}:{self.port}\",\n timeout=datetime.timedelta(0, 6000))\n\n def is_master(self):\n return (not self.is_distributed) or (self.rank == 0)\n\n def has_extra_work(self, work_size: int) -> bool:\n return self.is_distributed and has_extra_work(work_size, self.world_size, self.rank)\n\n def is_work_uneven(self, work_size: int) -> bool:\n return self.is_distributed and is_work_uneven(work_size, self.world_size)\n","repo_name":"IDSIA/kohonen-vae","sub_path":"framework/helpers/distributed.py","file_name":"distributed.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"12921204398","text":"import re\r\nfrom assets.dictionaries_for_languages import *\r\n\r\n# translation and language selection function\r\nclass simtracan:\r\n def option_selection(choice):\r\n option_error = False\r\n if choice == \"SM\": # Simplified Mandarin Chinese\r\n lang_name = \"Simplified Mandarin Chinese\"\r\n return (\"You just selected:\", lang_name)\r\n elif choice == \"TM\": # Traditional Mandarin Chinese\r\n lang_name = \"Traditional Mandarin Chinese\"\r\n return(\"You just selected:\", lang_name)\r\n elif choice == \"MP\": # Mandarin Chinese Pinyin\r\n lang_name = \"Mandarin Chinese Pinyin\"\r\n return(\"You just selected:\", lang_name)\r\n elif choice == \"C\": # Cantonese Chinese\r\n lang_name = \"Cantonese Chinese\"\r\n return(\"You just selected:\", lang_name)\r\n elif choice == \"CP\": # Cantonese Chinese Pinyin\r\n lang_name = \"Cantonese Chinese Pinyin\"\r\n return(\"You just selected:\", lang_name)\r\n else:\r\n return(\"Sorry, we couldn't understand the option you selected\")\r\n option_error = True\r\n\r\n def get_translation(from_lang, to_lang, text):\r\n # language selection\r\n translation = True # It can go to translate directly, in case of error, it will go to 'False'\r\n # Possible User Errors\r\n if from_lang == to_lang:\r\n translation = False\r\n error_found = \"Error: You selected the same language twice\"\r\n else:\r\n pass\r\n #if from_lang or to_lang == \"SM\" or \"TM\" or \"MP\" or \"C\" or \"CP\":\r\n #pass\r\n #else:\r\n #translation = False\r\n #error_found = \"Error: There was an error while selecting the first language\"\r\n # Simplified Mandarin to ...\r\n if from_lang == 'SM' and to_lang == 'TM':\r\n dictionary = sm2tm_dict\r\n elif from_lang == 'SM' and to_lang == 'MP':\r\n dictionary = sm2mp_dict\r\n elif from_lang == 'SM' and to_lang == 'C':\r\n dictionary = sm2c_dict\r\n # Traditional Mandarin to ...\r\n if from_lang == 'TM' and to_lang == 'SM':\r\n dictionary = tm2sm_dict\r\n elif from_lang == 'TM' and to_lang == 'MP':\r\n dictionary = tm2mp_dict\r\n elif from_lang == 'TM' and to_lang == 'C':\r\n dictionary = tm2c_dict\r\n # Mandarin Pinyin to ...\r\n if from_lang == 'MP' and to_lang == 'SM':\r\n dictionary = mp2sm_dict\r\n elif from_lang == 'MP' and to_lang == 'TM':\r\n dictionary = mp2tm_dict\r\n elif from_lang == 'MP' and to_lang == 'C':\r\n dictionary = mp2c_dict\r\n # Cantonese to ...\r\n if from_lang == 'C' and to_lang == 'TM':\r\n dictionary = c2tm_dict\r\n elif from_lang == 'C' and to_lang == 'SM':\r\n dictionary = c2sm_dict\r\n elif from_lang == 'C' and to_lang == 'CP':\r\n dictionary = c2cp_dict\r\n # Cantonese Pinyin to ...\r\n if from_lang == 'CP' and to_lang == 'SM':\r\n dictionary = cp2sm_dict\r\n elif from_lang == 'CP' and to_lang == 'TM':\r\n dictionary = cp2tm_dict\r\n elif from_lang == 'CP' and to_lang == 'C':\r\n dictionary = cp2c_dict\r\n # translation\r\n if translation == True:\r\n chinese_characters = \"[\\u4e00-\\u9fff]+\" # chinese characters\r\n if (re.search(chinese_characters, text)):\r\n # verification for chinese characters\r\n for word, replace in dictionary.items():\r\n text = text.replace(word, replace)\r\n print(\"\\nTranslation done!:\")\r\n return(text)\r\n else:\r\n return(\"Translation error: Your text doesn't contain Chinese characters!\")\r\n else:\r\n return(error_found)","repo_name":"danielabarazarte/simtracan-translator","sub_path":"Versions/version0.1.2/assets/simtracan_functions.py","file_name":"simtracan_functions.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"41811086075","text":"# -*- coding: utf-8 -*-\n# @Author : Lison Song\n# @Time : 2022/5/4 10:07\n\n# import gizeh\n# import moviepy.editor as mpy\nfrom conf import DATA_INPUTS, DATA_OUTPUTS\nfrom os import path, makedirs\nfrom moviepy.editor import *\n\n'''\nOSError: no library called \"cairo-2\" was found\nno library called \"cairo\" was found\n\ncairo-2 windows 环境配置异常 后面有时间的话 再测试吧\n'''\n\n\n# def make_frame(t):\n# surface = gizeh.Surface(width=320, height=260) # 宽、高\n# radius = surface.width * (1 + (t * (2 - t)) ** 2) / 6 # 半径随时间变化\n# circle = gizeh.circle(radius, xy=(64, 64), fill=(1, 0, 0))\n# circle.draw(surface)\n# return surface.getnpimage() # 返回一个8bit RGB数组\n\n\ndef mixing_1(sourcelist):\n '''\n 简单地把多个视频合成到一起的两种最简单的办法。1.把视频一个接一个地拼接起来。2.视频叠加在一块,比如一个大的画面同时播几个视频。\n 视频拼接我们使用 concatenate_videoclips 函数来完成。\n 注意-1:视频合并前 必须将分辨率调整一致 否则会很奇怪\n :param sourcelist: 待合并的视频文件list\n :return:\n '''\n\n clips = []\n for vidfile in sourcelist:\n clip = VideoFileClip(vidfile).resize(width=1280, height=720)\n clips.append(clip)\n\n finalclip = concatenate_videoclips(clips)\n finalclip.write_videofile(path.join(DATA_OUTPUTS, \"mixing_1.mp4\"), fps=25, codec='libx264', bitrate='2000k',\n audio_codec=\"aac\", audio_bitrate='128k')\n\n\ndef mixing_2(source):\n '''\n 合并成9宫格效果的视频\n finalclip 会按照 clip1,clip2,clip3 的顺序将这三个 clip 播放。这些 clip 并不需要相同的时长或者大小,仅仅是首尾相连而已。我们还可以通过 transition=my_clip\n 这个参数来设置一下 clip 之间衔接的过渡动画。视频叠加我们使用 clip_array 函数来完成\n :param source:\n :return:\n '''\n clip1 = VideoFileClip(source).margin(10)\n clip2 = clip1.fx(vfx.mirror_x) # x轴镜像\n clip3 = clip1.fx(vfx.mirror_y) # y轴镜像\n clip4 = clip1.resize(0.6) # 尺寸等比缩放0.6\n final_clip = clips_array([\n [clip1, clip2],\n [clip3, clip4]\n ])\n final_clip.resize(width=1280, height=720).write_videofile(path.join(DATA_OUTPUTS, \"mixing_2.mp4\"), fps=25,\n codec='libx264', bitrate='2000k', audio_codec=\"aac\",\n audio_bitrate='128k')\n\n\nif __name__ == '__main__':\n GIF_DIR = path.join(DATA_OUTPUTS, 'gifs')\n makedirs(GIF_DIR, exist_ok=True)\n\n # clip = mpy.VideoClip(make_frame, duration=2)\n # clip.write_gif(path.join(GIF_DIR, 'circle.gif'), fps=15)\n\n ## 视屏合并测试1 简单 拼合\n vidfiles_list =[path.join(DATA_INPUTS, 'video4.mp4'), path.join(DATA_INPUTS, 'video5.mp4'), path.join(DATA_INPUTS, 'video6.mp4')]\n mixing_1(vidfiles_list)\n\n ## 视屏合并测试2 视频翻转拼合 简单的 9宫格效果\n # mixing_2(path.join(DATA_INPUTS, 'video5.mp4'))\n","repo_name":"slc14211421/pythonstudy2022","sub_path":"moviepy-study/1_composite_video.py","file_name":"1_composite_video.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10838549908","text":"class Solution:\n def maxProduct(self, nums: List[int]) -> int:\n # simple solution - since there's negative numbers as well\n # we should store the most negative nums as well\n # the smallest number can become the lasrgest if multiplied \n # by a negative\n\n # this is also wy sliding window CANNOT be used here - \n # a negative number further down (outside the window) can\n # lead to a max subarray\n\n cur_min = cur_max = 1\n\n # ret is already assigned the first element\n # because we're not multiplying to it anyways\n ret = nums[0]\n\n for num in nums:\n vals = [num, cur_min*num, cur_max*num]\n cur_min, cur_max = min(vals), max(vals)\n ret = max(ret, cur_max)\n \n return ret\n\n\n\n\n \n","repo_name":"avitej-iyer/grind_111_sols","sub_path":"maximum_product_subarray.py","file_name":"maximum_product_subarray.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32625747518","text":"#given a BST find the maximum element in BST\nclass BSTNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\nroot = BSTNode(10)\nroot.left = BSTNode(7)\nroot.right = BSTNode(15)\nroot.left.left = BSTNode(3)\nroot.left.right = BSTNode(9)\nroot.right.left = BSTNode(12)\nroot.right.right = BSTNode(17)\n\n# 10\n# / \\\n# 7 15\n# / \\ / \\\n# 3 9 12 17\n#\n\ndef findMaximumRec(root):\n if not root:\n return\n else:\n current_node = root\n if current_node.right is not None:\n return findMaximumRec(current_node.right)\n\n else:\n return current_node.data\n\n\nprint(\"find Max. Recursively: \",findMaximumRec(root))\n\ndef findMaximumIter(root):\n if not root:\n return\n\n else:\n current_node = root\n while current_node.right is not None:\n current_node = current_node.right\n\n return current_node.data\nprint(\"find Max. Iteratively: \", findMaximumIter(root))\n","repo_name":"medeepeshyadav/Data-Structures-and-Algorithms","sub_path":"Trees/binary search trees/findMaximum.py","file_name":"findMaximum.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41105224788","text":"import math\nwhile True:\n try:\n ans = 0\n n,a = list(map(int,input().split()))\n for i in range(1,n+1):\n ans += i*pow(a,i)\n print(ans)\n except EOFError:\n break\n\n \n","repo_name":"Quezad4/UVA-Problems","sub_path":"10523.py","file_name":"10523.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34578060838","text":"from django.urls import path\nfrom .import views\n\n#app_name='project1'\n\nurlpatterns = [\n\n \n #path('', views.HomeView.as_view(), name=\"HomeView\"),\n path('', views.home, name=\"home\"),\n path('about/', views.about, name=\"about\"),\n path('product_list/', views.product_list, name=\"product_list\"),\n \n path('/', views.product_list, name=\"product_by_category\"),\n # path('add-to-cart//', add_to_cart, name='add_to_cart'),\n path('add_to_cart///', views.add_to_cart, name=\"add_to_cart\"),\n path('remove_from_cart///', views.remove_from_cart, name=\"remove_from_cart\"),\n path('remove_single_from_cart///', views.remove_single_from_cart, name=\"remove_single_from_cart\"),\n path('order_summary', views.OrderSummaryView.as_view(), name=\"order_summary\"),\n path('//', views.product_detail, name=\"product_detail\"),\n # path('product_detail//', views.product_detail.as_view(), name=\"product_detail\"),\n path('base/', views.base, name=\"base\"),\n \n \n \n \n]\n\n","repo_name":"dimosojunior/dimo","sub_path":"project1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15837045942","text":"n = int(input())\nwork = [list(map(int, input().split())) for _ in range(n)]\nd = [0] * (n+1)\nanswer = 0\n\nfor i in range(n-1, -1, -1):\n \n #초과하지 않는 날짜범위라면? ㄱㄱ\n if work[i][0] + i < n+1:\n #i일의 일당 + d[i날일의일양+i날], 답\n d[i] = max(work[i][1] + d[work[i][0] + i], answer)\n answer = d[i]\n else:\n d[i] = answer\n\nprint(answer)","repo_name":"mark1346/coding_test_study","sub_path":"sehun/8장_다이나믹프로그래밍/Part3/33_퇴사.py","file_name":"33_퇴사.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70287420072","text":"#\n# @lc app=leetcode id=989 lang=python3\n#\n# [989] Add to Array-Form of Integer\n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def addToArrayForm(self, num: List[int], k: int) -> List[int]:\n for i in reversed(range(len(num))):\n k, num[i] = divmod(num[i] + k, 10)\n\n while k > 0:\n num = [k % 10] + num\n k //= 10\n\n return num\n# @lc code=end\n\n","repo_name":"MegaBlackLabel/leetcode","sub_path":"989.add-to-array-form-of-integer.py","file_name":"989.add-to-array-form-of-integer.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27573088325","text":"\"\"\"Schema for common data types.\"\"\"\n\nimport pyrootutils\n\nROOT = pyrootutils.setup_root(\n search_from=__file__,\n indicator=[\".git\"],\n pythonpath=True,\n dotenv=True,\n)\n\nfrom bson import ObjectId\n\n\nclass PyObjectId(ObjectId):\n \"\"\"Pydantic ObjectId.\"\"\"\n @classmethod\n def __get_validators__(cls):\n yield cls.validate\n\n @classmethod\n def validate(cls, v):\n if not ObjectId.is_valid(v):\n raise ValueError(\"Invalid objectid\")\n return ObjectId(v)\n\n @classmethod\n def __modify_schema__(cls, field_schema):\n field_schema.update(type=\"string\")","repo_name":"ruhyadi/DRFace","sub_path":"src/schema/common_schema.py","file_name":"common_schema.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10944573655","text":"import sys\nsys.path.append('backend')\nimport logging\n\nfrom backend.controllers import marketplace\n\nlogger = logging.getLogger(__name__)\n\ndef sell_goods():\n logger.info('Selling all goods...')\n cargos = marketplace.list_cargo()\n logger.info(f'Current cargo: {cargos}')\n\n logger.info('Docking ship...')\n assert marketplace.docking_ship(), 'Ship is not docked'\n\n logger.info('Selling goods...')\n reciept = marketplace.sell_all_goods(cargos)\n logger.info(reciept)\n logger.info('Selling done.')\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(filename)s: %(message)s')\n sell_goods()\n","repo_name":"jeffreywu1996/spacetraders-client","sub_path":"sell_goods.py","file_name":"sell_goods.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25620317884","text":"import json\nimport unittest\n\nfrom src.ai.ai_commands import Json\nfrom src.ai.ai_data_and_info.ai_awards.ai_awards import AiAwards\nfrom src.ai.ai_data_and_info.ai_info import AiInfo\nfrom src.ai.ai_data_and_info.ai_logger import AiLogger\nfrom src.ai.ai_data_and_info.ai_logger_builder_director import AiLoggerBuilderDirector\nfrom src.ai.ai_data_and_info.ai_option import AiOption\nfrom src.ai.ai_data_and_info.game_info import GameInfo\nfrom src.ai.game_components.game_state import GameState\n\n\nclass TestLoggerCreator(unittest.TestCase):\n def setUp(self):\n self.logger: AiLogger = TestLoggerCreator.create_test_logger()\n\n @classmethod\n def create_test_logger(cls) -> AiLogger:\n director: AiLoggerBuilderDirector = AiLoggerBuilderDirector()\n ai_info = AiInfo(\"neuron-network\", \"scout-network\")\n game_id = 1\n player_id = 2\n ai_option = AiOption()\n ai_option.troopType = \"motorized\"\n ai_option.is_train = True\n game_info = GameInfo(game_id, player_id, ai_option)\n\n return director.create_ai_logger(ai_info, game_info)\n\n\nclass AiLoggerConstructor(TestLoggerCreator):\n def test_have_file_with_game_record(self):\n self.assertEqual(self.logger.get_game_log_file_path() != \"\", True)\n\n def test_have_file_with_end_game_state(self):\n self.assertEqual(self.logger.get_end_game_state_file_path() != \"\", True)\n\n\nclass AiLoggerSaveToFileWithGameRecord(TestLoggerCreator):\n def setUp(self):\n super().setUp()\n self.awards: AiAwards = AiAwards()\n self.game_state: GameState = GameState()\n self.game_state.current_time.set_string_presentation(\"0\")\n self.logger.save_to_game_record_file(self.awards, self.game_state)\n\n self.game_state2: GameState = GameState()\n self.game_state2.current_time.set_string_presentation(\"1\")\n self.logger.save_end_game_state(self.awards, self.game_state2)\n\n\nclass CanSaveToFileWithGameRecord(AiLoggerSaveToFileWithGameRecord):\n def setUp(self):\n super().setUp()\n game_log = open(self.logger.get_game_log_file_path(), 'r')\n print(type(game_log)) # TODO 7kia удалить позже\n self.game_log_content: Json = json.load(game_log)\n\n def test_game_state(self):\n self.assertEqual(self.game_log_content[\"data\"][0][\"game_state\"], self.game_state.as_json())\n self.assertEqual(self.game_log_content[\"data\"][1][\"game_state\"], self.game_state2.as_json())\n\n def test_awards_for_the_state(self):\n self.assertEqual(self.game_log_content[\"data\"][0][\"awards\"], self.awards.as_json())\n self.assertEqual(self.game_log_content[\"data\"][1][\"awards\"], self.awards.as_json())\n\n\nclass AiLoggerSaveToFileWithEndGameState(TestLoggerCreator):\n def setUp(self):\n super().setUp()\n self.awards: AiAwards = AiAwards()\n self.awards.troop_amount = 1\n self.awards.experience = 2\n self.awards.organization = 3\n self.awards.overlap = 4\n\n self.game_state: GameState = GameState()\n self.game_duration: int = 4\n self.game_state.current_time.set_string_presentation(str(self.game_duration))\n\n for i in range(self.game_duration - 1):\n game_state: GameState = GameState()\n game_state.current_time.set_string_presentation(str(i))\n self.logger.save_to_game_record_file(self.awards, game_state)\n\n self.logger.save_end_game_state(self.awards, self.game_state)\n\n\nclass CanSaveToFileWithEndGameState(AiLoggerSaveToFileWithEndGameState):\n def setUp(self):\n super().setUp()\n game_log = open(self.logger.get_end_game_state_file_path(), 'r')\n print(type(game_log)) # TODO 7kia удалить позже\n self.game_log_content: Json = json.load(game_log)\n\n self.check_state: int = self.game_duration\n\n def test_game_state(self):\n self.assertEqual(self.game_log_content[\"game_state\"],\n self.game_state.as_json())\n\n def test_awards_for_the_state(self):\n self.assertEqual(self.game_log_content[\"awards\"],\n self.awards.as_json())\n\n def test_game_duration(self):\n self.assertEqual(self.game_log_content[\"game_duration\"],\n str(self.game_duration))\n\n def test_awards_sum_for_whole_game(self):\n self.assertEqual(self.game_log_content[\"awards_sum\"], {\n \"troop_amount\": 1 * self.game_duration,\n \"experience\": 2 * self.game_duration,\n \"organization\": 3 * self.game_duration,\n \"overlap\": 4 * self.game_duration,\n })\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"7kia/AIServer","sub_path":"tests/ai/neural_network_logger.py","file_name":"neural_network_logger.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15114740780","text":"\"\"\"Add device model to manage api-key by group\n\nRevision ID: ead421cc5deb\nRevises: 23659f27ce8a\nCreate Date: 2022-05-18 16:34:06.326494\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ead421cc5deb'\ndown_revision = '23659f27ce8a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('device',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('device_name', sa.String(length=80), nullable=True),\n sa.Column('device_key', sa.String(length=80), server_default='8536f2e37df1459493258c513477adc2', nullable=True),\n sa.Column('group_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['group_id'], ['group.id'], name='fk_device_group_id', ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('device')\n # ### end Alembic commands ###\n","repo_name":"os-climate/sostrades-webapi","sub_path":"migrations/versions/ead421cc5deb_add_device_model_to_manage_api_key_by_.py","file_name":"ead421cc5deb_add_device_model_to_manage_api_key_by_.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"164328539","text":"import json as jsn\nfrom time import sleep\n\nfrom tests_common.pytest_bdd import given, when, then\nfrom mail.pypg.pypg.common import transaction, qexec\nfrom mail.pypg.pypg.query_conf import load_from_my_file\n\nfrom .mdb_actions import step_some_partitions, step_drop_partitions, step_check_partitions\n\nQUERIES = load_from_my_file(__file__)\n\n\ndef get_dsn(context, db_user=None):\n dsn = context.config['huskydb']\n if db_user:\n dsn = dsn + ' user=' + db_user\n return dsn\n\n\ndef has_shiva_tasks(context):\n with transaction(get_dsn(context)) as huskydb_conn:\n cur = qexec(\n huskydb_conn,\n QUERIES.get_shiva_tasks\n )\n return cur.fetchone() is not None\n\n\n@then(u'all shiva tasks finished')\ndef step_then_shiva_tasks_finished(context):\n has_tasks = True\n for t in (0.1, 0.3, 1, 3, 5, 5, 5, 5, 5, 5):\n sleep(t)\n has_tasks = has_shiva_tasks(context)\n if not has_tasks:\n break\n assert not has_tasks, 'Expected: there are no active shiva tasks, but there are some'\n\n\ndef has_transfer_tasks(context):\n with transaction(get_dsn(context)) as huskydb_conn:\n cur = qexec(\n huskydb_conn,\n QUERIES.get_shiva_tasks\n )\n return cur.fetchone() is not None\n\n\n@then(u'there are {has_some:NoOrSome} transfer tasks for \"{user_name:w}\"')\ndef step_then_has_transfer_tasks(context, has_some, user_name):\n with transaction(get_dsn(context)) as huskydb_conn:\n cur = qexec(\n huskydb_conn,\n QUERIES.get_transfer_tasks,\n uid=context.get_user(user_name).uid\n )\n has_tasks = cur.fetchone() is not None\n if has_some:\n assert has_tasks, (\n 'Expect that there are some active transfer tasks for user, but nothing found'\n )\n else:\n assert not has_tasks, (\n 'Expect that there are no active transfer tasks for user, but there are some'\n )\n\n\n@then(u'there are some transfer tasks for \"{user_name:w}\" with task args containing {task_args}')\ndef step_then_has_transfer_tasks_(context, user_name, task_args):\n with transaction(get_dsn(context)) as huskydb_conn:\n cur = qexec(\n huskydb_conn,\n QUERIES.get_transfer_tasks,\n uid=context.get_user(user_name).uid\n )\n res = cur.fetchone()\n assert res is not None and set(jsn.loads(task_args).items()).issubset(set(res[0].items())), (\n 'Expect that there are some active transfer tasks for user with given task_args, but nothing found'\n )\n\n\ndef clear_shiva_shard_tasks(context):\n with transaction(get_dsn(context)) as huskydb_conn:\n qexec(\n huskydb_conn,\n QUERIES.clear_shiva_shard_tasks,\n )\n\n\n@given('there are no shiva tasks')\ndef step_clear_shiva_tasks(context):\n clear_shiva_shard_tasks(context)\n\n\ndef clear_shiva_shards(context):\n with transaction(get_dsn(context)) as huskydb_conn:\n qexec(\n huskydb_conn,\n QUERIES.clear_shiva_shards,\n )\n\n\ndef add_shiva_shard(\n context,\n shard_id,\n cluster_id,\n disk_size,\n used_size=0,\n shard_type='general',\n load_type='general',\n can_transfer_to=False,\n shard_name='shard',\n migration=0,\n priority=0):\n with transaction(get_dsn(context)) as huskydb_conn:\n qexec(\n huskydb_conn,\n QUERIES.add_shiva_shard,\n shard_id=shard_id,\n shard_name=shard_name,\n cluster_id=cluster_id,\n disk_size=disk_size,\n used_size=used_size,\n shard_type=shard_type,\n load_type=load_type,\n can_transfer_to=can_transfer_to,\n migration=migration,\n priority=priority,\n )\n\n\n@given('all shards have load_type \"{load_type}\" and are open for registration')\ndef step_set_load_typefor_all_shards(context, load_type):\n add_shiva_shard(\n context=context,\n shard_id=context.config['shard_id'],\n cluster_id='cluster_id1',\n disk_size=100000000000,\n load_type=load_type,\n can_transfer_to=True,\n )\n add_shiva_shard(\n context=context,\n shard_id=context.config['shard_id2'],\n cluster_id='cluster_id2',\n disk_size=100000000000,\n load_type=load_type,\n can_transfer_to=True,\n )\n\n\n@when('we make new shiva shards')\ndef step_make_shiva_shards(context):\n context.shiva_shards = jsn.loads(context.text)\n for shard in context.shiva_shards.values():\n add_shiva_shard(context, **shard)\n\n\n@given('current shard is shiva shard with disk size \"{disk_size:d}\"')\ndef step_set_current_shiva_shard_with_disk_size(context, disk_size):\n add_shiva_shard(\n context=context,\n shard_id=context.config['shard_id'],\n cluster_id='cluster_id',\n disk_size=disk_size,\n can_transfer_to=True,\n )\n\n\n@given('there are {have_some:NoOrSome} open for transfer shiva shards with \"{load_type}\" load_type')\ndef step_make_open_shard(context, have_some, load_type):\n if have_some:\n add_shiva_shard(\n context=context,\n shard_id=666,\n cluster_id='cluster_id_666',\n disk_size=11111111111,\n can_transfer_to=True,\n load_type=load_type,\n )\n\n\ndef read_shards(rows):\n keys = 'shard_id', 'cluster_id', 'disk_size', 'used_size', 'shard_type', 'load_type', 'can_transfer_to', 'shard_name', 'migration', 'priority'\n return {str(row[0]): dict(zip(keys, row)) for row in rows}\n\n\ndef get_shiva_shard(context, shard_id):\n with transaction(get_dsn(context)) as huskydb_conn:\n cur = qexec(\n huskydb_conn,\n QUERIES.get_shiva_shard,\n shard_id=shard_id,\n )\n return read_shards(cur.fetchall())\n\n\n@then('current shard has updated used_size')\ndef step_shard_has_updated_used_size(context):\n shard_id = str(context.config['shard_id'])\n shiva_shards = get_shiva_shard(context, shard_id)\n assert shiva_shards[shard_id]['used_size'] > 0, 'Expected: shiva shard has used_size > 0'\n\n\n@then('transfer for current shard closed')\ndef step_shard_transfer_closed(context):\n shard_id = str(context.config['shard_id'])\n shiva_shards = get_shiva_shard(context, shard_id)\n assert not shiva_shards[shard_id]['can_transfer_to'], \\\n 'Expected: transfer for shard is close, but it was opened'\n\n\n@then('transfer for current shard opened')\ndef step_shard_transfer_opened(context):\n shard_id = str(context.config['shard_id'])\n shiva_shards = get_shiva_shard(context, shard_id)\n assert shiva_shards[shard_id]['can_transfer_to'], \\\n 'Expected: transfer for shard is open, but it was closed'\n\n\n@then('there is \"{shard_id:d}\" shard in DB')\ndef step_get_shiva_shard(context, shard_id):\n context.shiva_shards = get_shiva_shard(context, shard_id)\n assert context.shiva_shards, \\\n 'Missing shard with shard_id=\"{}\" in DB'.format(shard_id)\n\n\ndef get_all_shiva_shards(context):\n with transaction(get_dsn(context)) as huskydb_conn:\n cur = qexec(\n huskydb_conn,\n QUERIES.get_all_shiva_shards,\n )\n return read_shards(cur.fetchall())\n\n\n@then('there are no shards in DB')\ndef step_get_all_shiva_shards(context):\n context.shiva_shards = get_all_shiva_shards(context)\n assert not context.shiva_shards, \\\n 'Expected: there are no shards in DB, but was: \"{}\"'.format(context.shiva_shards)\n\n\n@given('there are some partitions for \"{table}\" in huskydb')\ndef step_some_partitions_in_huskydb(context, table):\n with transaction(get_dsn(context)) as huskydb_conn:\n step_some_partitions(context, huskydb_conn, table)\n\n\n@when('we drop \"{count:d}\" partitions for \"{table}\" in huskydb')\ndef step_drop_partitions_in_huskydb(context, table, count):\n with transaction(get_dsn(context)) as huskydb_conn:\n step_drop_partitions(huskydb_conn, table, count)\n\n\n@then('there are the same partitions for \"{table}\" in huskydb')\ndef step_check_partitions_in_huskydb(context, table):\n with transaction(get_dsn(context)) as huskydb_conn:\n step_check_partitions(context, huskydb_conn, table)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/lib/steps/shivadb.py","file_name":"shivadb.py","file_ext":"py","file_size_in_byte":8271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32633287586","text":"import sys\nimport numpy as np\nfrom PIL import Image, ImageTk\nfrom natsort import natsorted\n# from matplotlib import pyplot as plt\nimport math\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom tkinter import scrolledtext\nimport os\nimport pathlib \nimport time\nimport hashlib\nimport shutil\n\n# 自作モジュール\nimport deck_modules as dmod\nimport my_class_file as mcf\nGEO_MAIN_H = 700\nGEO_MAIN_W = 1000\nGEO_MAIN_X = 50\nGEO_MAIN_Y = 50\nTHUMBNAIL_H = 115\nTHUMBNAIL_W = 100\n\n\nINIT_SHEET_DIR = \"sheet\"\nINIT_OUTPUT_NAME = \"out\"\nTMP_WORK_DIR = \"tmp_work\"\nINIT_FTYPE = \"jpg\"\nDT_OMOTE = \"表面\"\nDT_URA = \"裏面\"\nDT_MAISUU = \"枚数\"\n\n#カードリストの項目幅\nC0WIDTH=3 #card ID\nC1WIDTH=3 #チェックボックス\nC2WIDTH=8 #face\nC3WIDTH=8 #back\nC4WIDTH=5 #num\n\n########拾い物##################\nGEO_SATE = '+50+50'\ndef get_size(tup, maxsize = 100):\n \"\"\" It returns the size of images on the summary\"\"\"\n x, y = tup\n if (x<=maxsize and y<=maxsize):\n return (x, y)\n elif x > y:\n r = float(maxsize) / float(x)\n return (maxsize, int(y*r))\n else:\n r = float(maxsize) / float(y)\n return (int(x*r), maxsize)\n\nclass ImageLabel:\n \"\"\" A Label class to show an image \"\"\"\n\n id_original_size = None # ID of the Label showing an original size image\n image_file_now = None\n \n def __init__(self, stxt, image_file, img):\n self.image= img\n self.image_file = image_file\n frame = tk.Frame(stxt, height=THUMBNAIL_H, width=THUMBNAIL_W)\n frame.pack_propagate(0)\n txt_label=tk.Label(frame, text=os.path.basename(self.image_file), font=('Helvetica', '8'))\n txt_label.pack(side=tk.BOTTOM)\n \n self.img_label=tk.Label(frame, image=self.image)\n self.img_label.pack(side=tk.BOTTOM)\n\n stxt.window_create(tk.END, align=tk.BASELINE, padx=5, pady=5, window=frame)\n self.img_label.bind('', self.show)\n\n def show(self, event):\n label = ImageLabel.id_original_size\n if (label and label.winfo_exists()):\n top = label.winfo_toplevel()\n top.destroy()\n top = tk.Toplevel(self.img_label)\n top.title(os.path.basename(self.image_file))\n top.geometry(GEO_SATE)\n img = Image.open(self.image_file)\n self.timg = ImageTk.PhotoImage(img.resize(get_size(img.size,maxsize = 600), Image.NEAREST))\n label=tk.Label(top, image=self.timg)\n label.pack()\n ImageLabel.id_original_size = label\n ImageLabel.image_file_now = self.image_file\n#######################\n\n# アプリケーション(GUI)クラス\nclass Application(tk.Frame):\n DEBUG_LOG = True\n def __init__(self, master=None):\n super().__init__(master)\n self.master.geometry(f\"{GEO_MAIN_W}x{GEO_MAIN_H}+{GEO_MAIN_X}+{GEO_MAIN_Y}\") # ウィンドウの幅と高さピクセル単位で指定(width x height)\n # self.pack()\n # self.file = []\n self.disp_img_once = False\n self.list_once = False\n self.cardlist_once = False\n self.preview_once = False\n self.sheet_list = [] \n self.img_dict ={} # CardImgオブジェクトのdict \n self.card_list =[] # Cardオブジェクトのlist\n self.dcard_list = [] # dCardオブジェクトのlist\n self.outcard_list =[] # Cardオブジェクトのlist\n self.sheet_dir = pathlib.Path().resolve() / INIT_SHEET_DIR\n self.tmp_fils_path = pathlib.Path().resolve() / TMP_WORK_DIR\n self.output_dir = pathlib.Path().resolve()\n self.ftype = INIT_FTYPE\n self.cut_out_cards = dmod.CutOutCards(rownum=1, columnum=1, dir_out_path = self.tmp_fils_path, ftype=INIT_FTYPE)\n self.copy_output = dmod.CreateZip()\n self.create_widgets()\n self.create_frame_select_dir()\n os.makedirs(self.tmp_fils_path, exist_ok=True)\n\n def create_widgets(self):\n # レイアウト\n self.frame_left = tk.Frame(self.master, width=GEO_MAIN_W//3, height=GEO_MAIN_H, bg=\"#f5f5dc\")\n self.frame_center = tk.Frame(self.master, width=GEO_MAIN_W//3, height=GEO_MAIN_H, bg=\"#7fffd4\")\n self.frame_right = tk.Frame(self.master, width=GEO_MAIN_W//3, height=GEO_MAIN_H, bg=\"#6495ed\")\n self.frame_left.pack(side = tk.LEFT)\n self.frame_center.pack(side = tk.LEFT) \n self.frame_right.pack(side = tk.LEFT) \n\n self.frame_select_dir = tk.Frame(self.frame_left, width=GEO_MAIN_W//3, height=GEO_MAIN_H*(2/10),relief=\"sunken\",borderwidth = 1, bg=self.frame_left.cget(\"bg\"))\n self.frame_disp_img = tk.Frame(self.frame_left, width=GEO_MAIN_W//3, height=GEO_MAIN_H*(8/10), bg=self.frame_left.cget(\"bg\"))\n self.frame_card_button = tk.Frame(self.frame_center, width=GEO_MAIN_W//3, height=GEO_MAIN_H*(2/10), bg=self.frame_center.cget(\"bg\"))\n self.frame_card_list = tk.Frame(self.frame_center, width=GEO_MAIN_W//3, height=GEO_MAIN_H*(8/10), bg=self.frame_center.cget(\"bg\"))\n \n self.frame_select_dir.pack(anchor=tk.W) \n self.frame_select_dir.pack_propagate(False) \n self.frame_select_dir.grid_propagate(False)\n self.frame_select_dir.update_idletasks()\n \n self.frame_disp_img.pack(anchor=tk.W)\n self.frame_disp_img.pack_propagate(False)\n self.frame_disp_img.grid_propagate(False)\n self.frame_disp_img.update_idletasks()\n\n self.frame_card_button.pack(anchor=tk.W)\n self.frame_card_button.pack_propagate(False)\n self.frame_card_button.grid_propagate(False)\n self.frame_card_button.update_idletasks() \n\n self.frame_card_list.pack(anchor=tk.W)\n self.frame_card_list.pack_propagate(False)\n self.frame_card_list.grid_propagate(False)\n self.frame_card_list.update_idletasks() \n\n self.frame_right.pack_propagate(False)\n self.frame_right.grid_propagate(False)\n self.frame_right.update_idletasks() \n \n\n def create_frame_select_dir(self):\n # シートディレクトリ選択\n self.dir_tbox = tk.Entry(self.frame_select_dir, width = 50)\n self.dir_tbox.insert(tk.END,self.sheet_dir) \n self.dir_tbox.grid(row=0, column=0, sticky=tk.W, pady=2)\n \n # 2行目表示\n row2 = tk.Frame(self.frame_select_dir, bg=self.frame_select_dir.cget(\"bg\"))\n row2.grid(row=1,column = 0,sticky=tk.W,pady = 5)\n \n # 拡張子選択フォーム \n self.ftype_list = ttk.Combobox(row2, state='readonly', width = 4)\n self.ftype_list[\"values\"] = (\"jpg\", \"png\", \"bmp\")\n self.ftype_list.current(0)\n self.ftype_list.pack(side = tk.LEFT, anchor = tk.W)\n\n # シート選択ボタン\n self.select_dir_button = tk.Button(row2, text='シート選択', command=self.select_dir_button_func)\n self.select_dir_button.pack(side = tk.LEFT, padx=5) \n\n \n # 3行目表示\n row3 = tk.Frame(self.frame_select_dir, bg=self.frame_select_dir.cget(\"bg\"))\n row3.grid(row=2,column = 0,sticky=tk.W)\n # 行入力フォーム\n row_tex = tk.Label(row3,text = \"行:\", bg=self.frame_select_dir.cget(\"bg\"))\n row_tex.pack(side = tk.LEFT) \n self.row_tbox = tk.Entry(row3, width = 3)\n self.row_tbox.insert(tk.END, 3) \n self.row_tbox.pack(side = tk.LEFT)\n # 列入力フォーム \n colum_tex = tk.Label(row3,text = \"列:\", bg=self.frame_select_dir.cget(\"bg\"))\n colum_tex.pack(side = tk.LEFT) \n self.colum_tbox = tk.Entry(row3, width = 3)\n self.colum_tbox.insert(tk.END, 6) \n self.colum_tbox.pack(side = tk.LEFT)\n # 削り幅割合フォーム\n colum_tex = tk.Label(row3,text = \"削り幅(%):[短辺準拠]\", bg=self.frame_select_dir.cget(\"bg\"))\n colum_tex.pack(side = tk.LEFT) \n self.rate_tbox = tk.Entry(row3, width = 3)\n self.rate_tbox.insert(tk.END, 3) \n self.rate_tbox.pack(side = tk.LEFT)\n # 「シートをトリミング」ボタン\n self.run_trim_button = tk.Button(row3, text='シートをトリミング', command=self.run_trim_button_func)\n self.run_trim_button.pack(side = tk.LEFT, anchor=tk.E, padx = 12)\n\n row4 = tk.Frame(self.frame_select_dir, bg=self.frame_select_dir.cget(\"bg\"))\n row4.grid(row=3,column = 0,sticky=tk.W)\n txt_label1 = tk.Label(row4, text = '【トリミング不要のカードを読み込む場合】', bg=row3.cget(\"bg\"))\n txt_label1.pack(anchor = tk.W)\n txt_label2 = tk.Label(row4, font=(\"\", 8, \"\"), text = ' 1)「シート選択」でカードのあるフォルダを選択。\\n 2) 行1、列 1、削り幅を任意にしてトリミング。', bg=row3.cget(\"bg\"),justify=\"left\")\n txt_label2.pack(side = tk.LEFT, anchor = tk.W)\n # 「カード一覧表示」ボタン\n self.run_cardlist_button = tk.Button(row4, text='カード一覧表示', command=self.disp_card_list_button_func)\n self.run_cardlist_button.pack(side = tk.LEFT, anchor=tk.E, padx = 5)\n self.run_cardlist_button.config(state = tk.DISABLED)\n\n self.make_sheet_list()\n self.disp_img(self.sheet_list)\n\n def create_frame_card_operation(self):\n frow1 = tk.Frame(self.frame_card_button, bg = self.frame_card_button.cget(\"bg\"))\n frow1.pack(anchor=tk.W)\n # #リストの選択解除ボタン\n all_select_button = tk.Button(frow1,text='全て選択',command=self.all_select_button_func)\n all_select_button.grid(row=0,column=0)\n all_clear_button = tk.Button(frow1, text='選択解除',command=self.all_clear_button_func)\n all_clear_button.grid(row=0, column=1)\n \n add_card_button = tk.Button(frow1, text='カード追加',command=self.add_card_buttom_func)\n add_card_button.grid(row=0, column=2)\n\n frow2 = tk.Frame(self.frame_card_button, bg = self.frame_card_button.cget(\"bg\"))\n frow2.pack(anchor=tk.E)\n label1 = tk.Label(frow2, text=\"選択したカードの\", bg = frow2.cget(\"bg\"))\n label1.pack(side = tk.LEFT)\n self.card_cbox = tk.ttk.Combobox(frow2,width = 6, state = \"readonly\")\n self.card_cbox[\"values\"] = (DT_OMOTE,DT_URA,DT_MAISUU)\n self.card_cbox.current(1)\n self.card_cbox.pack(side = tk.LEFT)\n label2 = tk.Label(frow2, text=\"を\", bg = frow2.cget(\"bg\"))\n label2.pack(side = tk.LEFT)\n self.card_entry = tk.Entry(frow2,width=8)\n self.card_entry.insert(tk.END, f'000.{INIT_FTYPE}') \n self.card_entry.pack(side = tk.LEFT)\n label3 = tk.Label(frow2, text=\"に\", bg = frow2.cget(\"bg\"))\n label3.pack(side = tk.LEFT)\n run_card_change_button = tk.Button(frow2, text='変更する',command=self.run_card_change_button_func)\n run_card_change_button.pack(side = tk.LEFT)\n frow3 = tk.Frame(self.frame_card_button, bg = self.frame_card_button.cget(\"bg\"))\n frow3.pack(anchor=tk.E)\n preview_button = tk.Button(frow3, text='選択したカードのプレビュー',command=self.preview_button_func)\n preview_button.pack()\n\n def create_frame_output_buttoms(self):\n frow1 = tk.Frame(self.frame_right, width = self.frame_right.cget(\"width\"), height=GEO_MAIN_H*(1/10), bg = self.frame_right.cget(\"bg\"))\n\n # 出力先ディレクトリ選択\n self.outdir_tbox = tk.Entry(frow1, width = 50)\n self.outdir_tbox.insert(tk.END, self.output_dir) \n self.outdir_tbox.pack(side = tk.LEFT)\n\n frow2 = tk.Frame(self.frame_right, width = self.frame_right.cget(\"width\"), height=GEO_MAIN_H*(1/10),bg = self.frame_right.cget(\"bg\"))\n\n # シート選択ボタン\n select_dir_button = tk.Button(frow2, text='出力先フォルダの選択', command=self.select_outdir_button_func)\n select_dir_button.pack(side = tk.LEFT) \n\n frow3 = tk.Frame(self.frame_right, width = self.frame_right.cget(\"width\"), height=GEO_MAIN_H*(1/10),bg = self.frame_right.cget(\"bg\"))\n # 出力ボタン\n l1 = tk.Label(frow3, text=\" 出力ファイル名:\", bg = frow3.cget(\"bg\"))\n l1.pack(side = tk.LEFT)\n self.output_name_entry = tk.Entry(frow3, width = 8)\n self.output_name_entry.insert(tk.END, INIT_OUTPUT_NAME)\n self.output_name_entry.pack(side = tk.LEFT)\n l2 = tk.Label(frow3, text=\".zip\", bg = frow3.cget(\"bg\"))\n l2.pack(side = tk.LEFT)\n deck_output_buttom = tk.Button(frow3, text='デッキを出力', command = self.deck_output_buttom_fnc)\n deck_output_buttom.pack(padx = 6, side = tk.LEFT)\n\n frow3.pack(fill=tk.BOTH, side = tk.BOTTOM,anchor=tk.N)\n frow2.pack(fill=tk.BOTH, side = tk.BOTTOM,anchor=tk.N) \n frow1.pack(fill=tk.BOTH, side = tk.BOTTOM,anchor=tk.N)\n\n # def create_frame_disp_card(self):\n # frame = tk.Frame(self.frame_right, width = self.frame_right.cget(\"width\"), height=GEO_MAIN_H*(9/10), bg = self.frame_right.cget(\"bg\"))\n\n def all_select_button_func(self):\n for dcard in self.dcard_list:\n dcard.cb.set(True)\n\n def all_clear_button_func(self):\n for dcard in self.dcard_list:\n dcard.cb.set(False)\n\n def disp_unimplemented(self):\n messagebox.showinfo(\"未実装\", \"この機能はまだないよ\")\n\n def run_card_change_button_func(self):\n tar = self.card_cbox.get()\n txt = self.card_entry.get()\n if tar == DT_MAISUU and (txt.isdecimal() is False):\n messagebox.showinfo(\"エラー\", \"数字を入れて\")\n return\n \n if tar != DT_MAISUU and not(INIT_FTYPE in txt):\n messagebox.showinfo(\"エラー\", \"拡張子不明\")\n return\n \n for dcard in self.dcard_list:\n c = dcard.cb.get()\n if c is True:\n if tar == DT_OMOTE:\n dcard.face.delete(0, tk.END)\n dcard.face.insert(tk.END, txt)\n elif tar == DT_URA:\n dcard.back.delete(0, tk.END)\n dcard.back.insert(tk.END, txt)\n elif tar == DT_MAISUU:\n dcard.num.delete(0, tk.END)\n dcard.num.insert(tk.END, txt)\n else:\n messagebox.showinfo(\"エラー\", \"予期せぬエラー\")\n\n\n def select_dir_button_func(self):\n # フォルダ選択ダイアログ\n tmp_dir = filedialog.askdirectory(initialdir = os.path.dirname(__file__), title =\"シートがあるフォルダを選択\")\n if len(tmp_dir)!=0:\n self.sheet_dir = pathlib.Path(tmp_dir)\n self.dir_tbox.delete(0, tk.END) \n self.dir_tbox.insert(tk.END, str(self.sheet_dir))\n self.make_sheet_list()\n self.disp_img(self.sheet_list)\n \n \n def select_outdir_button_func(self):\n # フォルダ選択ダイアログ\n tmp_dir = filedialog.askdirectory(initialdir = os.path.dirname(__file__), title =\"出力フォルダを選択\")\n if len(tmp_dir)!=0:\n self.output_dir = pathlib.Path(tmp_dir)\n self.outdir_tbox.delete(0, tk.END) \n self.outdir_tbox.insert(tk.END, str(self.output_dir))\n \n def run_trim_button_func(self):\n self.remove_cards()\n self.ftype = self.ftype_list.get()\n self.cut_out_cards.rownum = int(self.row_tbox.get())\n self.cut_out_cards.columnum = int(self.colum_tbox.get())\n self.cut_out_cards.cut_offset_rate = float(self.rate_tbox.get())\n self.cut_out_cards.cut_out(self.sheet_list)\n time.sleep(0.1)\n self.make_cardimg_dict()\n self.disp_img(self.img_dict)\n # messagebox.showinfo(\"メッセージ\", \"トリミング完了\")\n self.run_cardlist_button.config(state = tk.ACTIVE) \n\n def disp_card_list_button_func(self):\n self.make_card_list()\n self.disp_card_list()\n if self.cardlist_once is False:\n self.create_frame_card_operation()\n self.cardlist_once = True\n \n def deck_output_buttom_fnc(self):\n oname = self.output_name_entry.get()\n if len(str(self.output_dir)) == 0:\n messagebox.showinfo(\"エラー\", \"フォルダが指定されていません。\")\n return\n if len(str(oname)) == 0:\n messagebox.showinfo(\"エラー\", \"ファイル拡張子が指定されていません。\")\n return \n opath = self.output_dir / oname\n wpath = self.tmp_fils_path / oname\n\n os.makedirs(wpath, exist_ok=True)\n self.copy_output.create_zip(card_list = self.outcard_list, out_path=opath, work_path = wpath)\n messagebox.showinfo(\"メッセージ\", \"デッキ作成完了\")\n \n def add_card_buttom_func(self):\n self.add_new_card_dcard()\n\n def preview_button_func(self):\n self.make_outcard_list()\n if self.preview_once is False:\n self.create_frame_output_buttoms()\n self.disp_outcard_preview()\n self.preview_once = True\n \n\n def make_cardimg_dict(self):\n self.img_dict = {}\n if len(str(self.tmp_fils_path)) == 0:\n messagebox.showinfo(\"エラー\", \"フォルダが指定されていません。\")\n return\n if len(str(self.ftype)) == 0:\n messagebox.showinfo(\"エラー\", \"ファイル拡張子が指定されていません。\")\n return \n files = self.tmp_fils_path.glob(f'**/*.{INIT_FTYPE}')\n files = natsorted(files)\n\n for file in files:\n with open(file, 'rb') as image:\n target = image.read()\n target_sha256 = hashlib.sha256(target).hexdigest() \n card = mcf.CardImg(path = file, f_hash= target_sha256)\n self.img_dict.update({file.name: card})\n\n def make_card_list(self):\n self.card_list = []\n for id,cimg in enumerate(self.img_dict.values()):\n self.card_list.append(mcf.Card(id=id, face = cimg, back = self.img_dict[f\"blank.{INIT_FTYPE}\"]))\n\n def make_outcard_list(self):\n self.outcard_list = []\n for dc in self.dcard_list:\n if dc.cb.get() is True:\n id = dc.id\n face_img_name = dc.face.get()\n back_img_name = dc.back.get()\n tnum = dc.num.get()\n if not(tnum.isdecimal()):\n messagebox.showinfo(\"エラー\", f\"ID{id}の枚数が正しくありません。\")\n return \n if not(face_img_name in self.img_dict.keys()):\n messagebox.showinfo(\"エラー\", f\"ID{id}の表面{face_img_name}は存在しません。\")\n return\n if not(back_img_name in self.img_dict.keys()):\n messagebox.showinfo(\"エラー\", f\"ID{id}の裏面{back_img_name}のカードは存在しません。\")\n return\n face = self.img_dict[face_img_name]\n back = self.img_dict[back_img_name]\n for i in range(int(tnum)):\n self.outcard_list.append(mcf.Card(id = id, face = face, back = back))\n\n def add_new_card_dcard(self):\n id = len(self.dcard_list)\n self.card_list.append(mcf.Card(id = id,face = self.img_dict[f\"blank.{INIT_FTYPE}\"], back = self.img_dict[f\"blank.{INIT_FTYPE}\"]))\n self.add_dcard_list(id, self.card_list[-1])\n\n def remove_cards(self):\n files = self.tmp_fils_path.glob(f'**/*')\n for f in files:\n if os.path.isfile(f):\n os.remove(f) \n \n def remove_dir(self):\n for i in range(20):\n if os.path.isdir(self.tmp_fils_path):\n try:\n shutil.rmtree(self.tmp_fils_path)\n except Exception:\n print(f\"{i}回目:e \",end=\"\")\n time.sleep(0.5)\n else:\n print()\n break\n else:\n messagebox.showinfo(\"エラー\", \"作業フォルダを削除できませんでした。\")\n \n\n def make_sheet_list(self):\n self.ftype = self.ftype_list.get()\n if len(str(self.sheet_dir)) == 0:\n messagebox.showinfo(\"エラー\", \"フォルダが指定されていません。\")\n return\n if len(str(self.ftype)) == 0:\n messagebox.showinfo(\"エラー\", \"ファイル拡張子が指定されていません。\")\n return\n files = self.sheet_dir.glob(f'**/*.{self.ftype}')\n self.sheet_list = natsorted(files)\n\n def disp_img(self, tarflist):\n \"\"\"\n カードシートとカードシートで共有で使う画像表示領域作成&更新用\n \"\"\"\n if self.disp_img_once is False:\n self.disp_img_once = True\n self.disp_img_stxt = scrolledtext.ScrolledText(self.frame_disp_img, bg = self.frame_disp_img.cget(\"bg\"))\n self.disp_img_stxt.pack(fill=tk.BOTH, expand=1)\n else:\n self.disp_img_stxt.configure(state=tk.NORMAL)\n self.disp_img_stxt.delete('1.0', tk.END)\n self.disp_img_stxt.configure(state=tk.DISABLED)\n\n if type(tarflist) is dict:\n tarflist = list(tarflist.values())\n\n for in_file in tarflist:\n s = str(in_file)\n img = Image.open(open(s, 'rb'))\n ImageLabel(self.disp_img_stxt, s, ImageTk.PhotoImage(img.resize(get_size(img.size), Image.NEAREST)))\n\n def disp_outcard_preview(self):\n \"\"\"\n 出力画像プレビューで使う画像表示領域作成&更新用。\n frameを引数にしてdisp_imgと共有にしてもいいかと思ったが、場合分け面倒なので別作成。\n \"\"\"\n if self.preview_once is False:\n frame = tk.Frame(self.frame_right, width = self.frame_right.cget(\"width\"), height=GEO_MAIN_H*(9/10), bg = self.frame_right.cget(\"bg\"))\n frame.pack(fill=tk.BOTH, side = tk.BOTTOM,anchor=tk.N)\n frame.pack_propagate(False) \n frame.grid_propagate(False)\n frame.update_idletasks()\n self.disp_card_stxt = scrolledtext.ScrolledText(frame, bg = self.frame_right.cget(\"bg\"))\n self.disp_card_stxt.pack(fill=tk.BOTH, expand=1)\n else:\n self.disp_card_stxt.configure(state=tk.NORMAL)\n self.disp_card_stxt.delete('1.0', tk.END)\n self.disp_card_stxt.configure(state=tk.DISABLED)\n \n #subframeをdisp_card_stxtに配置\n subframe = tk.Frame(self.disp_card_stxt, bg = self.disp_card_stxt.cget(\"bg\")) #背景を白に\n # frame.pack(fill=tk.BOTH, side = tk.BOTTOM,anchor=tk.N)\n self.disp_card_stxt.window_create(tk.END, align=tk.BASELINE, padx=5, pady=5, window=subframe)\n\n for card in self.outcard_list:\n fp = str(card.face.path)\n bp = str(card.back.path)\n fimg = Image.open(open(fp, 'rb'))\n bimg = Image.open(open(bp, 'rb'))\n width = 2.3*THUMBNAIL_W\n height =1.3* THUMBNAIL_H\n label_frame = tk.LabelFrame(subframe,bd=2, width = width, height=height, relief=\"ridge\",text=f\"ID{card.id}\", bg = self.disp_card_stxt.cget(\"bg\"))\n label_frame.pack(pady=5)\n label_frame.pack_propagate(False)\n txtw = tk.Text(label_frame, bg = self.disp_card_stxt.cget(\"bg\"))\n txtw.pack()\n txtw.configure(state=tk.DISABLED)\n ImageLabel(txtw, fp, ImageTk.PhotoImage(fimg.resize(get_size(fimg.size), Image.NEAREST)))\n ImageLabel(txtw, bp, ImageTk.PhotoImage(bimg.resize(get_size(bimg.size), Image.NEAREST)))\n \n\n #表を作成\n def disp_card_list(self): \n num_list = len(self.card_list) #リストの数\n \n cv_width = GEO_MAIN_W//4\n cv_height = GEO_MAIN_H*(8/10)\n\n #スクロールバー\n # self.disp_cardlist_stxt=tk.ttk.Scrollbar(self.frame_card_list,orient=tk.VERTICAL) #縦方向\n if self.list_once is False:\n self.list_once = True\n self.disp_cardlist_stxt = scrolledtext.ScrolledText(self.frame_card_list, bg = self.frame_card_list.cget(\"bg\"))\n # self.disp_cardlist_stxt.grid(row=1,column=0,sticky='ns') \n self.disp_cardlist_stxt.pack(fill=tk.BOTH, expand=1)\n else:\n self.disp_cardlist_stxt.configure(state=tk.NORMAL)\n self.disp_cardlist_stxt.delete('1.0', tk.END)\n self.disp_cardlist_stxt.configure(state=tk.DISABLED)\n\n #Frameを作成\n self.subframe_card_list = tk.Frame(self.disp_cardlist_stxt, bg ='white') #背景を白に\n\n #frameをvbarに配置\n self.disp_cardlist_stxt.window_create(tk.END, align=tk.BASELINE, padx=5, pady=5, window=self.subframe_card_list)\n\n\n \n #header row=1に設定する文字列 余白は0に\n e0=tk.Label(self.subframe_card_list,width=C0WIDTH,text='ID',background='white')\n e0.grid(row=1,column=0,padx=0,pady=0,ipadx=0,ipady=0) #0列目\n \n e1=tk.Label(self.subframe_card_list,width=C1WIDTH,text='選択',background='white',anchor='w')\n e1.grid(row=1,column=1,padx=0,pady=0,ipadx=0,ipady=0) #1列目\n \n e2=tk.Label(self.subframe_card_list,width=C2WIDTH,text=DT_OMOTE,background='white',anchor='w')\n e2.grid(row=1,column=2,padx=0,pady=0,ipadx=0,ipady=0) #2列目\n \n e3=tk.Label(self.subframe_card_list,width=C3WIDTH,text=DT_URA,background='white',anchor='w')\n e3.grid(row=1,column=3,padx=0,pady=0,ipadx=0,ipady=0) #3列目\n\n e4=tk.Label(self.subframe_card_list,width=C4WIDTH,text=DT_MAISUU,background='white',anchor='w')\n e4.grid(row=1,column=4,padx=0,pady=0,ipadx=0,ipady=0) #4列目\n\n irow = 2\n irow0=2\n erow=num_list+irow0\n \n self.dcard_list = []\n # while irow < erow: #リストの数分ループしてLabelとチェックボックスを設置\n for i, card in enumerate(self.card_list): \n self.add_dcard_list(id=i, card=card)\n\n def add_dcard_list(self, id, card):\n irow = id+2\n #色の設定\n if irow%2==0:\n color='#cdfff7' #薄い青\n else:\n color='white'\n #カードID\n a0 = id\n b0=tk.Label(self.subframe_card_list,width=C0WIDTH,text=a0,background=color,anchor='w')\n b0.grid(row=irow,column=0,padx=0,pady=0,ipadx=0,ipady=0) #0列目\n\n #チェックボックスの設置\n bln=tk.BooleanVar() \n bln.set(False) \n c = tk.Checkbutton(self.subframe_card_list,variable = bln,width=C0WIDTH,text='',background=color)\n # list_chk.append(bln) #チェックボックスの初期値\n c.grid(row=irow,column=1,padx=0,pady=0,ipadx=0,ipady=0) #1列目\n \n #カード表面\n # a2 = self.card_list[irow-irow0].face.path.name\n a2 = card.face.path.name\n b2 = tk.Entry(self.subframe_card_list,width=C2WIDTH, background=color)\n b2.insert(tk.END, a2) \n b2.grid(row=irow,column=2,padx=0,pady=0,ipadx=0,ipady=0) #2列目\n \n #カード裏面\n # a3 = self.card_list[irow-irow0].face.path.name\n a3 = card.back.path.name\n b3 = tk.Entry(self.subframe_card_list,width=C3WIDTH, background=color)\n b3.insert(tk.END, a3) \n b3.grid(row=irow,column=3,padx=0,pady=0,ipadx=0,ipady=0) #3列目\n \n #カード枚数\n a4 = 1 #デフォルトは1枚\n b4 = tk.Entry(self.subframe_card_list,width=C4WIDTH, background=color)\n b4.insert(tk.END, a4) \n b4.grid(row=irow,column=4,padx=0,pady=0,ipadx=0,ipady=0) #4列目\n \n # チェックボックス、テキストボックスの情報を外部参照できるように保存\n self.dcard_list.append(mcf.dCard(a0, bln, b2, b3, b4))\n\n# 実行\nroot = tk.Tk() \nroot.resizable(1,1)\nmyapp = Application(master=root)\nmyapp.master.title(\"デッキメーカー\") # タイトル\n\nmyapp.mainloop()\nmyapp.remove_dir()\n","repo_name":"matsuokajh/deckMaker4udon","sub_path":"deckMaker4udon.py","file_name":"deckMaker4udon.py","file_ext":"py","file_size_in_byte":28161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9753280079","text":"import sys\nsys.stdin = open('input.txt')\n\nword = input()\n\nstart = 'A'\ntime = 0\n\nfor i in word:\n tmp = abs(ord(i) - ord(start))\n start = i\n time += min(tmp, 26 - tmp)\n\nprint(time)\n\n","repo_name":"seulgi-mun/Algorithm","sub_path":"Python/Baekjoon/ZOAC 2_18238.py","file_name":"ZOAC 2_18238.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35082357920","text":"# -*- coding: utf-8 -*-\n\"\"\"\nNAME\n MESH_streamflow_visualize_test \nPURPOSE\n The purpose of this script is read MESH streamflow results and present hydrogragh \n for a time period of interest. In addition to hydrograph, the location of gauge \n stations is presented for visual presentation. \n \nInput \n MESH_state MESH streamflow observations and simulations\n Merit_catchment_shape Merit Hydro catchment shape file\n Merit_river_shape Merit Hydro river network shape file\n WSC_stations Water Survey Canada gauge stations \n \nOutput \n Gauge locations\n Hydrograph plots \nPROGRAMMER(S)\n Ala Bahrami\n \nREVISION HISTORY\n 20221004 -- Initial version created and posted online\n 20230126 -- Jupyter notebook created\n \nREFERENCES\n\n\"\"\"\n#%% importing modules \nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport datetime\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n#%% reading the input file \nMESH_state_dir = '../6_model_runs/results/' \nMerit_catchment_shape = '../../shapefiles/catchment/BowAtBanff_cat.shp'\nMerit_river_shape = '../../shapefiles/river_network/BowAtBanff_riv.shp'\nWSC_stations = '../../shapefiles/BowBanff_WSC_Stations.shp'\noutdir = '../workflow_data/domain_BowAtBanff/visualizations/'\nstate_name = 'MESH_output_streamflow.csv'\n\n#%% reading input shape files \ncatchment = gpd.read_file(Merit_catchment_shape)\nwsc = gpd.read_file(WSC_stations)\nriver = gpd.read_file(Merit_river_shape)\n\n#%% plot style \nfont = {'family' : 'Times New Roman',\n 'weight' : 'bold',\n 'size' : 20}\nmatplotlib.rc('font', **font)\ncl =('b','r')\nlsty = ['-','--']\n\n#%% reading input shape files \ncatchment = gpd.read_file(Merit_catchment_shape)\nwsc = gpd.read_file(WSC_stations)\nriver = gpd.read_file(Merit_river_shape)\n\n#%% setting input paramters \nstation = ['05BB001','05BA001']\nnames = [\"observation\",\"simulation\"] \n\n#%% convert Julian date to standard date \ndef julian_todate (jdate):\n fmt = '%Y%j'\n datestd = datetime.datetime.strptime(jdate, fmt).date()\n return(datestd)\n\n#%% construct monthly and yearly time variable based on MESH wb file \ndef time_construct(data,time_step):\n # construct time index \n nn = len(data)\n ts = '%d'%data['month'][0]+'/'+'%d'%data['day'][0]+'/'+'%d'%data['YEAR'][0]\n te = '%d'%data['month'][nn-1]+'/'+'%d'%data['day'][nn-1]+'/'+'%d'%data['YEAR'][nn-1]\n tm = pd.date_range(start=ts, end=te, freq=time_step)\n return(tm)\n\n#%% reading the input file \nstflo = pd.read_csv(MESH_state_dir+state_name, skipinitialspace=True)\nn = len(stflo)\n\n# replacing invalid streamflow observations with NAN \nstflo[stflo == -1] = np.nan\n\n#%% construnct time index\nmonth = np.zeros((n,1))\nday = np.zeros((n,1))\n\nfor i in range(n): \n jd = '%d'%stflo['YEAR'][i]+'%d'%stflo['JDAY'][i]\n md = julian_todate(jd)\n day[i] = md.day\n month[i] = md.month\n\nstflo['month'] = month\nstflo['day'] = day\n\ntime = time_construct(stflo,'D')\n\n#%% Display location of two stations \nm = len(station)\nfig,ax = plt.subplots(figsize=(20, 20))\ncatchment.plot(color='white', edgecolor='k', label = 'Catchment Boundary', linewidth=0.1, ax = ax)\nriver.plot(color='blue', label = 'River Network', linewidth=0.3, ax=ax)\nwsc.plot(color='red', label = 'Water Survey Stations',ax=ax)\n\n# add label for WSC stations \nfor i in range(m):\n plt.text(wsc['Longitude'][i]-0.05, wsc['Latitude'][i]+0.01, wsc['Station'][i], fontsize=14)\n\n# set lables \nax.set_title('BowBanff Basin')\nax.set_xlabel('Longitude [degree east]')\nax.set_ylabel('Latitude [degree north]')\n\nax.grid(True)\nax.legend(fontsize=14)\n\n# save image \nplt.savefig(outdir+\"WSC_Gauge.png\", format='png', dpi=600)\n\nplt.show()\nplt.close()\n\n#%% display daily hydrograghs \nfor k in range(m):\n fig,axs = plt.subplots(figsize=(20, 20))\n obs = 'QOMEAS%d'%(k+1)\n sim = 'QOSIM%d'%(k+1)\n axs.plot(time, stflo[obs],\n linestyle = lsty[0],\n color=cl[0], alpha=0.6, \n label = names[0])\n axs.plot(time, stflo[sim],\n linestyle = lsty[1],\n color=cl[1], alpha=0.6,\n label = names[1])\n\n # setting labels \n axs.set_title('MESH streamflow simulation of station '+station[k])\n axs.set_xlabel('Time [days]')\n axs.set_ylabel(\"River Discharge [$m^{3}$ $s^{-1}$]\")\n # setting axis \n plt.xticks(rotation=45)\n axs.grid(True)\n axs.legend()\n \n # setting xlim\n axs.set_xlim(time[0], time[n-1])\n \n # saving image \n plt.savefig(outdir+station[k]+\"_streamflow.png\", format='png', dpi=600) \n \n # showing and closing \n plt.show()\n plt.close()","repo_name":"MESH-Model/MESH-Scripts","sub_path":"Model_Workflow/vector_based_workflow/8_visulization/MESH_streamflow_visualize_test.py","file_name":"MESH_streamflow_visualize_test.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"14964092707","text":"from django.core.management.base import BaseCommand, CommandError\n\nfrom core.models import Account\nfrom repository.models import PreprintAuthor, Preprint\nfrom utils.models import LogEntry\n\n# https://stackoverflow.com/a/39257511/1763984\ndef boolean_input(question, default=None):\n result = input(\"%s \" % question)\n if not result and default is not None:\n return default\n while len(result) < 1 or result[0].lower() not in \"yn\":\n result = input(\"Please answer yes or no: \")\n return result[0].lower() == \"y\"\n\nclass Command(BaseCommand):\n help = \"move preprints from a proxy account to a new account\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"active_user\", help=\"`email` of new active account\", type=str\n )\n parser.add_argument(\"proxy_user\", help=\"`email` of old proxy account\", type=str)\n parser.add_argument(\n '--no-prompt',\n action='store_true',\n help=\"Don't prompt the user (used for testing)\",\n )\n\n def handle(self, *args, **options):\n do_prompt = not options[\"no_prompt\"]\n if not Account.objects.filter(email=options[\"active_user\"]).exists():\n raise CommandError(\n \"active_user does not exist\"\n )\n if not Account.objects.filter(email=options[\"proxy_user\"]).exists():\n raise CommandError(\n \"proxy_user does not exist\"\n )\n\n active_user = Account.objects.get(email=options[\"active_user\"])\n proxy_user = Account.objects.get(email=options[\"proxy_user\"])\n\n # sanity checks\n if proxy_user == active_user:\n raise CommandError(\n \"active_user and proxy_user have the same id, nothing to do\"\n )\n\n # echo what will happen, and ask the operator to okay\n prompt = \"\"\"user\n\t{} ({}) **{} USER**\nwill become the owner of preprints from the proxy user\n\t{} ({}) **{} USER**\n\"\"\".format(\n active_user.full_name(),\n active_user.email,\n \"ACTIVE\" if active_user.is_active else \"INACTIVE\",\n proxy_user.full_name(),\n proxy_user.email,\n \"ACTIVE\" if proxy_user.is_active else \"INACTIVE\",\n )\n self.stdout.write(self.style.NOTICE(prompt))\n\n if proxy_user.is_active is True:\n self.stdout.write(self.style.NOTICE(\"{} ({}) is active and will be deleted\\n\".format(proxy_user.full_name(),\n proxy_user.email)))\n if do_prompt and not boolean_input(\"Are you sure? (yes/no)\"):\n raise CommandError(\"preprint move aborted\")\n\n for pa in PreprintAuthor.objects.filter(account=proxy_user):\n if PreprintAuthor.objects.filter(preprint=pa.preprint, account=active_user).exists():\n pa.delete()\n else:\n pa.account = active_user\n pa.save()\n\n LogEntry.objects.filter(actor=proxy_user).update(actor=active_user)\n\n Preprint.objects.filter(owner=proxy_user).update(owner=active_user)\n\n proxy_user.delete()\n\n # done!\n self.stdout.write(self.style.SUCCESS(\"✅ process complete\"))\n","repo_name":"eScholarship/janeway_cdl_utils","sub_path":"management/commands/move_preprints.py","file_name":"move_preprints.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10111690476","text":"\"\"\"\nRepetições \nWhile(enquanto)\nExecuta uma ação enquanto uma condição for verdadeira\nLoop infinito -> quando um código não tem fim.\n\"\"\"\nimport os\n\ncontador = 0 \n\nwhile contador < 100:\n contador += 1\n print(contador)\n\n if contador > 8 and contador < 56:\n continue\n\n if contador == 60:\n break\n\nprint('fim do codigo') \n \n\n\n ","repo_name":"igrrcardoso/Curso-Udemy","sub_path":"aula37.py","file_name":"aula37.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"474283877","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom envs.gym_wrapper import *\n\nfrom tqdm import tqdm\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport wandb\nimport argparse\nimport yaml\nimport os\n\nimport numpy as np\nimport math\nimport scipy.stats as st\n\nfrom moral.active_learning import *\nfrom moral.preference_giver import *\nfrom utils.generate_demos_not_main import *\nfrom utils.evaluate_ppo_not_main import *\nfrom utils.load_config import *\nfrom moral.airl import *\nfrom utils.save_data import *\n\ndef training_sampler_eval(expert_trajectories, policy_trajectories, ppo, batch_size, latent_posterior=None):\n\tstates = []\n\taction_probabilities = []\n\tnext_states = []\n\tlabels = []\n\tlatents = []\n\tfor i in range(batch_size):\n\t\t# 1 if (s,a,s') comes from expert, 0 otherwise\n\t\t# expert_boolean = np.random.randint(2)\n\t\texpert_boolean = 1 if i < batch_size/2 else 0\n\t\tif expert_boolean == 1:\n\t\t\tselected_trajectories = expert_trajectories\n\t\telse:\n\t\t\tselected_trajectories = policy_trajectories\n\n\t\trandom_tau_idx = np.random.randint(len(selected_trajectories))\n\t\trandom_tau = selected_trajectories[random_tau_idx]['states']\n\t\trandom_state_idx = np.random.randint(len(random_tau)-1)\n\t\tstate = random_tau[random_state_idx]\n\t\tnext_state = random_tau[random_state_idx+1]\n\n\t\t# Sample random latent to condition ppo on for expert samples\n\t\tif latent_posterior is not None:\n\t\t\tif expert_boolean == 1:\n\t\t\t\tlatent = latent_posterior.sample_prior()\n\t\t\t\tlatent = latent.to(device)\n\t\t\telse:\n\t\t\t\tlatent = torch.tensor(selected_trajectories[random_tau_idx]['latents']).to(device)\n\n\t\t\taction_probability, _ = ppo.forward(torch.tensor(state).float().to(device), latent)\n\t\t\taction_probability = action_probability.squeeze(0)\n\t\t\tlatents.append(latent.cpu().item())\n\t\telse:\n\t\t\taction_probability, _ = ppo.forward(torch.tensor(state).float().to(device))\n\t\t\taction_probability = action_probability.squeeze(0)\n\t\t# Get the action that was actually selected in the trajectory\n\t\tselected_action = selected_trajectories[random_tau_idx]['actions'][random_state_idx]\n\n\t\tstates.append(state)\n\t\tnext_states.append(next_state)\n\t\taction_probabilities.append(action_probability[selected_action].item())\n\t\tlabels.append(expert_boolean)\n\n\treturn torch.tensor(states).float().to(device), torch.tensor(next_states).float().to(device), \\\n\t\t torch.tensor(action_probabilities).float().to(device),\\\n\t\t torch.tensor(labels).long().to(device), torch.tensor(latents).float().to(device)\n\ndef evaluate_discriminator(discriminator, optimizer, gamma, expert_trajectories, policy_trajectories, ppo, batch_size, latent_posterior=None):\n\tcriterion = nn.CrossEntropyLoss()\n\tstates, next_states, action_probabilities, labels, latents\\\n\t\t= training_sampler_eval(expert_trajectories, policy_trajectories, ppo, batch_size, latent_posterior)\n\tif len(latents) > 0:\n\t\tadvantages = discriminator.forward(states, next_states, gamma, latents)\n\telse:\n\t\tadvantages = discriminator.forward(states, next_states, gamma) \n\t# Cat advantages and log_probs to (batch_size, 2)\n\tclass_predictions = torch.cat([torch.log(action_probabilities).unsqueeze(1), advantages], dim=1)\n\tloss = criterion(class_predictions, labels)\n\t# Compute Accuracies\n\tlabel_predictions = torch.argmax(class_predictions, dim=1)\n\tpredicted_fake = (label_predictions[labels == 0] == 0).float()\n\tpredicted_expert = (label_predictions[labels == 1] == 1).float()\n\n\tloss.backward()\n\treturn loss.item(), torch.mean(predicted_fake).item(), torch.mean(predicted_expert).item()\n\n# folder to load config file\nCONFIG_PATH = \"configs/\"\nCONFIG_FILENAME = \"config_EVALUATE.yaml\"\n\nif __name__ == '__main__':\n\n\tc = load_config(CONFIG_PATH, CONFIG_FILENAME)\n\n\t# Create Environment\n\tvec_env = VecEnv(c[\"env_rad\"]+c[\"env\"], 12)\n\tstates = vec_env.reset()\n\tstates_tensor = torch.tensor(states).float().to(device)\n\n\t# Fetch Shapes\n\tn_actions = vec_env.action_space.n\n\tobs_shape = vec_env.observation_space.shape\n\tstate_shape = obs_shape[:-1]\n\tin_channels = obs_shape[-1]\n\n\tvanilla_path = \"\"\n\tif c[\"vanilla\"]:\n\t\tvanilla_path = c[\"vanilla_path\"]\n\n\n\tfor i in range(c[\"nb_experts\"]):\n\t\tpath = c[\"data_path\"]+c[\"env_path\"]+vanilla_path+str(c[\"experts_weights\"][i])+\"/\"\n\n\t\t# expert_filename = c[\"data_path\"]+c[\"env_path\"]+vanilla_path+\"moral_agents/[[0, 1, 0, 1], [0, 0, 1, 1]]131_new_norm_v5_v3\"+c[\"model_ext\"]\n\t\texpert_filename = path+c[\"expe_path\"]+c[\"model_ext\"]\n\t\trand_filename = c[\"data_path\"]+c[\"env_path\"]+vanilla_path+c[\"rand_path\"]+c[\"expe_path\"]+c[\"model_ext\"]\n\t\tdiscriminator_filename = path+c[\"disc_path\"]+c[\"model_ext\"]\n\t\tdiscriminator_old_filename = path+c[\"disc_path\"]+c[\"model_ext\"]\n\n\t\tdemos_filename = path+c[\"demo_path\"]+c[\"demo_ext\"]\n\t\trand_demos_filename = c[\"data_path\"]+c[\"env_path\"]+vanilla_path+c[\"rand_path\"]+c[\"demo_path\"]+c[\"demo_ext\"]\n\n\t\t# experts\n\t\texpert_policy = PPO(state_shape=state_shape, in_channels=in_channels, n_actions=n_actions).to(device)\n\t\texpert_policy.load_state_dict(torch.load(expert_filename, map_location=torch.device('cpu')))\n\n\t\t# discriminator\n\t\tdiscrim_list = Discriminator(state_shape=state_shape, in_channels=in_channels).to(device)\n\t\tdiscrim_list.load_state_dict(torch.load(discriminator_filename, map_location=torch.device('cpu')))\n\t\toptimizer_discriminator = torch.optim.Adam(discrim_list.parameters(), lr=5e-5)\n\n\t\t# old discriminator\n\t\tdiscrim_old_list = Discriminator(state_shape=state_shape, in_channels=in_channels).to(device)\n\t\tdiscrim_old_list.load_state_dict(torch.load(discriminator_old_filename, map_location=torch.device('cpu')))\n\t\toptimizer_old_discriminator = torch.optim.Adam(discrim_old_list.parameters(), lr=5e-5)\n\n\t\t# rand agents\n\t\trand_policy = PPO(state_shape=state_shape, in_channels=in_channels, n_actions=n_actions).to(device)\n\t\trand_policy.load_state_dict(torch.load(rand_filename, map_location=torch.device('cpu')))\n\t\t# save_data(rand_policy, rand_filename)\n\n\t\tif c[\"gene_bool\"]:\n\t\t\tgenerator_filename = path+c[\"gene_path\"]+c[\"model_ext\"]\n\t\t\tgene_demos_filename = path+c[\"demo_path\"]+c[\"gene\"]+c[\"demo_ext\"]\n\t\t\t# generator\n\t\t\tgene_policy = PPO(state_shape=state_shape, in_channels=in_channels, n_actions=n_actions).to(device)\n\t\t\tgene_policy.load_state_dict(torch.load(generator_filename, map_location=torch.device('cpu')))\n\n\t\tif c[\"generate_demos\"] :\n\t\t\t# generate_demos_1_expert(c[\"env_rad\"]+c[\"env\"], c[\"nb_demos\"], expert_filename, demos_filename)\n\t\t\tgenerate_demos_1_expert(c[\"env_rad\"]+c[\"env\"], c[\"nb_demos\"], rand_filename, rand_demos_filename)\n\t\t\tif c[\"gene_bool\"]:\n\t\t\t\tgenerate_demos_1_expert(c[\"env_rad\"]+c[\"env\"], c[\"nb_demos\"], generator_filename, gene_demos_filename)\n\n\t\t# Load demonstrations & evaluate ppo\n\t\texpert_trajectories = pickle.load(open(demos_filename, 'rb'))\n\t\tprint(\"eval expert = \", evaluate_ppo(expert_policy, c[\"env_id\"]))\n\t\trand_trajectories = pickle.load(open(rand_demos_filename, 'rb'))\n\t\tprint(\"eval rand = \", evaluate_ppo(rand_policy, c[\"env_id\"]))\n\n\t\tif c[\"gene_bool\"]:\n\t\t\tgene_trajectories = pickle.load(open(gene_demos_filename, 'rb'))\n\n\t\t\tprint(\"eval gene = \", evaluate_ppo(gene_policy, c[\"env_id\"]))\n\t\t\td_loss, fake_acc, real_acc = evaluate_discriminator(discriminator=discrim_list,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer=optimizer_discriminator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgamma=c[\"gamma\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texpert_trajectories=expert_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpolicy_trajectories=gene_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tppo=gene_policy,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=c[\"batchsize_discriminator\"])\n\t\t\tprint(\"new gene and new discrim\")\n\t\t\tprint('Discriminator Loss ', d_loss)\n\t\t\tprint('Fake Accuracy ', fake_acc)\n\t\t\tprint('Real Accuracy ', real_acc)\n\n\t\t\td_loss, fake_acc, real_acc = evaluate_discriminator(discriminator=discrim_old_list,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer=optimizer_old_discriminator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgamma=config.gamma,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texpert_trajectories=expert_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpolicy_trajectories=gene_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tppo=gene_policy,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=c[\"batchsize_discriminator\"])\n\t\t\tprint(\"new gene and old dicrim\")\n\t\t\tprint('Discriminator Loss ', d_loss)\n\t\t\tprint('Fake Accuracy ', fake_acc)\n\t\t\tprint('Real Accuracy ', real_acc)\n\n\n\t\td_loss, fake_acc, real_acc = evaluate_discriminator(discriminator=discrim_list,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer=optimizer_discriminator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgamma=config.gamma,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texpert_trajectories=expert_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpolicy_trajectories=rand_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tppo=expert_policy,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=c[\"batchsize_discriminator\"])\n\t\tprint(\"rand policy and new discrim, expe policy\")\n\t\tprint('Discriminator Loss ', d_loss)\n\t\tprint('Fake Accuracy ', fake_acc)\n\t\tprint('Real Accuracy ', real_acc)\n\n\t\td_loss, fake_acc, real_acc = evaluate_discriminator(discriminator=discrim_old_list,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer=optimizer_old_discriminator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgamma=config.gamma,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texpert_trajectories=expert_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpolicy_trajectories=rand_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tppo=expert_policy,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=c[\"batchsize_discriminator\"])\n\t\tprint(\"rand policy and old discrim, expe policy\")\n\t\tprint('Discriminator Loss ', d_loss)\n\t\tprint('Fake Accuracy ', fake_acc)\n\t\tprint('Real Accuracy ', real_acc)\n\n\t\td_loss, fake_acc, real_acc = evaluate_discriminator(discriminator=discrim_list,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer=optimizer_discriminator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgamma=config.gamma,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texpert_trajectories=expert_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpolicy_trajectories=rand_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tppo=rand_policy,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=c[\"batchsize_discriminator\"])\n\t\tprint(\"rand policy and new discrim\")\n\t\tprint('Discriminator Loss ', d_loss)\n\t\tprint('Fake Accuracy ', fake_acc)\n\t\tprint('Real Accuracy ', real_acc)\n\n\t\td_loss, fake_acc, real_acc = evaluate_discriminator(discriminator=discrim_old_list,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer=optimizer_old_discriminator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgamma=config.gamma,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texpert_trajectories=expert_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpolicy_trajectories=rand_trajectories,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tppo=rand_policy,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=c[\"batchsize_discriminator\"])\n\t\tprint(\"rand policy and old discrim\")\n\t\tprint('Discriminator Loss ', d_loss)\n\t\tprint('Fake Accuracy ', fake_acc)\n\t\tprint('Real Accuracy ', real_acc)","repo_name":"MariusLC/internship_backup","sub_path":"moral_rl/utils/evaluate_ppo.py","file_name":"evaluate_ppo.py","file_ext":"py","file_size_in_byte":10209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15142490639","text":"n=int(input())\r\nl=input()\r\nd=dict()\r\nfor a in l:\r\n if not a in d:\r\n d[a]=1\r\n else:\r\n d[a]+=1\r\nans=1\r\nfor a in d.values():\r\n ans*=a+1\r\nprint((ans-1)%(10**9+7))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc031/A/4892609.py","file_name":"4892609.py","file_ext":"py","file_size_in_byte":167,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"33606002345","text":"\"\"\" Вторая версия парсера эксель документа для БД swimming_competitions\"\"\"\nimport pyodbc\n\n\ndriver_excel = '{Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)}'\nlocation_excel = 'D:\\\\for SQL Server\\\\copy2.xlsx' # copy2.xlsx excel_person2.xls excel_person.xls\nexcel_list = '[Лист1$]' # [ПРОТОКОЛ$] или [Лист1$]\ndriver_sql = '{SQL Server}'\nserver_sql = 'DESKTOP-NE8ID00\\\\SQLSERVER'\ndatabase_sql = 'swimming_competitions'\nranks = [None, '2юн', '1юн', '3', '2', '1', 'кмс', 'мс', 'мсмк', 'змс']\nnambers_0_to_9 = '1234567890'\n\n\ndef edit_excel(): # Правка экселя, для правильной интерпритации значения столбцов НЕ РАБОТАЕТ!!!\n connection_str_excel = \"DRIVER={};DBQ={};\".format(driver_excel, location_excel)\n conn_excel = pyodbc.connect(connection_str_excel, autocommit=True, ReadOnly=0)\n cursor_excel = conn_excel.cursor() # insert into city (title_city) values ('{}'\n # sstr_list = tuple(('строка ' * 7).split())\n insert_str = \"insert into {} (A, B, C, D, E, F, G) values ('{}', '{}', '{}', '{}', '{}', '{}', '{}')\" \\\n \"\".format(excel_list, 'строка ')\n cursor_excel.execute(insert_str)\n conn_excel.close()\n\n\nconnection_str_excel = \"DRIVER={};DBQ={};\".format(driver_excel, location_excel)\nconn_excel = pyodbc.connect(connection_str_excel, autocommit=True, MaxScanRows=8)\ncursor_excel = conn_excel.cursor()\nselect_from_excel_str = \"Select * From {}\".format(excel_list)\ncursor_excel.execute(select_from_excel_str)\nexcel_file = cursor_excel.fetchall()\n\nconnection_str_sql_server = \"Driver={}; Server={}; Database={};\".format(driver_sql, server_sql, database_sql)\nconn_sql_server = pyodbc.connect(connection_str_sql_server)\ncursor_sql_server = conn_sql_server.cursor()\n\n\ndef style_unification(style): # приведение к одному виду\n if style == 'батт.':\n style = 'баттерфляй'\n elif style == 'в/ст':\n style = 'вольный стиль'\n elif style == 'к/пл':\n style = 'комплексное плавание'\n elif style == 'н/сп':\n style = 'на спине'\n return style\n\n\ndef create_string_for_sql(dictionary, table_name): # Принимает словарь и название таблицы. Формирует сторку для SQL\n count_key = len(dictionary)\n string_one = \"insert into {} (\".format(table_name)\n string_two = \") values (\"\n keys_and_values = tuple(dictionary.items())\n columns = [(str(k[0]) + \",\") for k in keys_and_values]\n columns[-1] = columns[-1].rstrip(',')\n values = [(\"'\" + str(k[1]) + \"',\") for k in keys_and_values]\n values[-1] = values[-1].rstrip(',')\n columns = ' '.join(columns)\n values = ' '.join(values)\n string_finish = string_one + columns + string_two + values + \")\"\n return string_finish\n\n\ndef get_time(string):\n if string != 'дисквал.':\n string = list(string)\n i = string.index('.')\n string[i] = ':'\n i = string.index(',')\n string[i] = '.'\n string = ''.join(string)\n return '00:' + string\n else:\n return None\n\n\ndef get_time_second(raw_time): # Преобразует время для второго экселя\n raw_time = raw_time.replace(\",\", \".\")\n raw_time = raw_time.replace(\":\", \".\")\n if raw_time.count(\".\") == 2:\n raw_time = raw_time.replace(\".\", \":\", 1)\n if (raw_time.index('.') + 2) == len(raw_time):\n raw_time += '0'\n if len(raw_time) == 5:\n raw_time = '00:00:' + raw_time\n else:\n raw_time = '00:0' + raw_time\n return raw_time\n\n\ndef rang_pars (rang_str): # Прреобразует разряды к одному типу\n if rang_str is None:\n return None\n elif type(rang_str) is int:\n return str(rang_str)\n elif type(rang_str) is float:\n return str(int(rang_str))\n else:\n return rang_str\n\n\ndef parser_excel_first_type (excel_file):\n event = dict.fromkeys(['title_event', 'date_event', 'city_event', 'pool'])\n event['title_event'] = 'Итоговый протокол'\n competition = dict.fromkeys(['gender', 'distance', 'style', 'birth_year_comp', 'day_comp'])\n competition['day_comp'] = 1\n results = []\n i = 0\n for string in excel_file:\n i += 1\n if i == 1 or i == 2:\n event['title_event'] += string[0]\n continue\n elif i == 4:\n date_start = string[1][:2] # Преобразавания даты в формат ГГГГ-ММ-ДД\n date_event = string[1][5:13]\n date_event = date_start + date_event\n date_event = date_event.split('.')\n date_event.reverse()\n date_event = '-'.join(date_event)\n event['date_event'] = date_event\n event['city_event'] = string[1][-7:]\n event['pool'] = int(string[6].split()[1])\n continue\n string = tuple(string)\n count_none = string.count(None)\n if count_none == 12 or count_none == 10 or i == 5:\n continue\n elif count_none == 11:\n string = [s for s in string if s is not None][0]\n if 'день' in string:\n competition['day_comp'] = int(string[0])\n continue\n else:\n list_comp = string.split()\n competition['distance'] = int(list_comp[0])\n competition['style'] = style_unification(list_comp[1])\n if list_comp[2] == 'девочки':\n competition['gender'] = 'Ж'\n else:\n competition['gender'] = 'М'\n competition['birth_year_comp'] = int(list_comp[3])\n continue\n else:\n name = string[1].split()\n year = int(string[2])\n city_club = string[4].split(',', 1)\n city = city_club[0]\n if city == 'Могилёв': # Костыль для буквы Ё\n city = 'Могилев'\n if len(city_club) == 2:\n club = city_club[1].lstrip()\n else:\n club = None\n time = get_time(string[5])\n rank = rang_pars(string[3])\n new_rank = rang_pars(string[6])\n if ranks.index(rank) < ranks.index(new_rank):\n rank = new_rank\n keys = ['firstname', 'lastname', 'birth_year', 'rank', 'city', 'club', 'time']\n if club == 'Латвия':\n keys[5] = 'country'\n club = 'LAT'\n values = (name[1], name[0], year, rank, city, club, time)\n result = {k: v for k, v in zip(keys, values) if v is not None}\n result.update(competition)\n result.update(event)\n results.append(result)\n return results\n\n\ndef parser_excel_second_type(excel_file):\n event = dict.fromkeys(['title_event', 'date_event', 'city_event', 'pool'])\n event['title_event'] = 'Протокол '\n competition = dict.fromkeys(['gender', 'distance', 'style', 'birth_year_comp', 'day_comp'])\n results = []\n i = 0\n payment_falg = True\n for string in excel_file:\n string = tuple(string)\n none = string.count(None)\n if none == 1 or none == 4 or none == 5 or none == 6 or none == 9:\n continue\n if none == 7 and i < 3:\n i += 1\n event['title_event'] += string[1]\n continue\n if payment_falg: # Парсинг данных event\n before = event['title_event'].index('г.')\n title_event = event['title_event'][:before]\n event['title_event'] = event['title_event'].split(',')\n for element in event['title_event']:\n if 'г.' in element:\n i = (element.index('г.') + 2)\n event['city_event'] = element[i:]\n elif 'бассейн' in element:\n pool = [n for n in element if n in nambers_0_to_9]\n event['pool'] = int(''.join(pool))\n event_date = (str(event['title_event'][-2][:2]) + str(event['title_event'][-2][-8:])).split('.')\n event_date.reverse()\n event['date_event'] = '-'.join(event_date)\n event['title_event'] = title_event\n payment_falg = False\n if none == 8: # Строки с днями и competitions\n string = string[1]\n if 'день' in string:\n competition['day_comp'] = int(string[0])\n continue\n else:\n string = string.split()\n for j, v in enumerate(string, start=1):\n if j == 1:\n if v == 'Девушки' or v == 'Девочки':\n competition['gender'] = 'Ж'\n else:\n competition['gender'] = 'М'\n continue\n elif j == 2:\n if len(v) < 5:\n competition['birth_year_comp'] = int(v)\n elif len(v) == 8:\n competition['birth_year_comp'] = int(v[:4])\n else:\n competition['birth_year_comp'] = int(v[5:9])\n continue\n if '0' in v:\n nine = '1234567890'\n distance = [namber for namber in v if namber in nine]\n competition['distance'] = int(''.join(distance))\n break\n style = []\n if '0' not in string[-2]:\n style.append(string[-2])\n style.append(string[-1])\n competition['style'] = ' '.join(style)\n continue\n if none > 3:\n continue\n name = string[1].split()\n year = int('20' + str(string[2]))\n city_club = string[3].split(',', 1)\n if 'Гомель' in city_club[0]:\n city = 'Гомель'\n else:\n city = city_club[0]\n if len(city_club) == 2:\n club = city_club[1]\n else:\n club = None\n country = string[4]\n if none == 3:\n time = None\n else:\n time = get_time_second(string[5])\n if string[6] is None:\n points = None\n else:\n points = int(string[6])\n keys = ['firstname', 'lastname', 'birth_year', 'country', 'city', 'club', 'time', 'points']\n values = (name[1], name[0], year, country, city, club, time, points)\n result = {k: v for k, v in zip(keys, values) if v is not None}\n result.update(competition)\n result.update(event)\n results.append(result)\n return results\n\n\ndef insert_ranks(ranks): # Таблица rank, колонка rank_value, primary_right\n primary_right = 0\n for rank in ranks:\n insert_rank_str = \"insert into rank (rank_value, primary_right) values ('{}', '{}')\".format(rank, primary_right)\n try:\n cursor_sql_server.execute(insert_rank_str)\n except (pyodbc.IntegrityError):\n str_error = \"Ошибка при добовлении разряда {} в таблицу rank!\".format(rank)\n # print(str_error)\n conn_sql_server.commit()\n primary_right += 1\n\n\ndef insert_gender():\n gender_list = ('М', 'Ж')\n for gender in gender_list:\n insert_gender_str = \"insert into gender (title_gender) values ('{}')\".format(gender)\n try:\n cursor_sql_server.execute(insert_gender_str)\n except (pyodbc.IntegrityError):\n str_error = \"Ошибка при добовлении title_gender '{}' в таблицу gender!\".format(gender)\n # print(str_error)\n conn_sql_server.commit()\n\n\ndef insert_style(results): # Таблица style, колонка title_style\n styles = {result['style'] for result in results}\n styles = list(styles)\n for style in styles:\n insert_style_str = \"insert into style (title_style) values ('{}')\".format(style)\n try:\n cursor_sql_server.execute(insert_style_str)\n except (pyodbc.IntegrityError):\n str_error = \"Ошибка при добовлении title_style '{}' в таблицу style!\".format(style)\n # print(str_error)\n conn_sql_server.commit()\n\n\ndef insert_country(results): # Таблица country\n countries = {result['country'] for result in results if 'country' in result}\n if len(countries) != 0:\n countries = list(countries)\n for country in countries:\n select_str = \"select * from country where abbreviation='{}'\".format(country)\n cursor_sql_server.execute(select_str)\n answer = cursor_sql_server.fetchone() # Узнаем есть ли страна в таблице country\n if answer is None: # Если страны нет, то вставляем ее\n insert_str = \"insert into country (abbreviation) values ('{}')\".format(country)\n try:\n cursor_sql_server.execute(insert_str)\n except (pyodbc.IntegrityError):\n pass\n conn_sql_server.commit()\n\n\ndef insert_city(results): # Таблица city , колонки title_city и country_id\n cityes = {result['city'] for result in results}\n cityes = list(cityes)\n for city in cityes:\n select_str = \"select * from city where title_city='{}'\".format(city)\n cursor_sql_server.execute(select_str)\n answer = cursor_sql_server.fetchone() # Узнаем есть ли город в таблице city\n if answer is not None: # Если город есть, то узнаем есть ли ссылка на страну\n select_str = \"select country_id from city where title_city='{}'\".format(city)\n cursor_sql_server.execute(select_str)\n answer = cursor_sql_server.fetchone()\n if answer is not None:\n continue\n else:\n country = None # Если город есть, но нет страны, то пробуем узнать ее\n for result in results:\n if 'country' in result:\n if result['city'] == city:\n country = result['country']\n break\n if country is None:\n continue\n else: # Если можем добавить страну\n select_str = \"select country_id from country where abbreviation='{}'\".format(country)\n cursor_sql_server.execute(select_str)\n answer = cursor_sql_server.fetchone()[0]\n insert_str = \"insert into city (country_id) values ('{}')\".format(answer)\n cursor_sql_server.execute(insert_str)\n conn_sql_server.commit()\n continue\n else: # Если города в списке нет, то добовляем\n insert_str = \"insert into city (title_city) values ('{}')\".format(city)\n cursor_sql_server.execute(insert_str)\n conn_sql_server.commit()\n country = None # Теперь пробуем добавить индекс страны\n for result in results:\n if 'country' in result:\n if result['city'] == city:\n country = result['country']\n break\n if country is None:\n continue\n else: # Если можем добавить страну, то добовляем\n select_str = \"select country_id from country where abbreviation='{}'\".format(country)\n cursor_sql_server.execute(select_str)\n answer = cursor_sql_server.fetchone()[0]\n update_str = \"update city set country_id = '{}' where title_city = '{}'\".format(answer, city)\n cursor_sql_server.execute(update_str)\n conn_sql_server.commit()\n continue\n\n\ndef insert_club(results): # Таблица club, колонки title_club и city_id\n clubs = {(result['club'], result['city']) for result in results if 'club' in result}\n clubs = list(clubs)\n if len(clubs):\n for club in clubs:\n select_str = \"select city_id from city where title_city='{}'\".format(club[1])\n cursor_sql_server.execute(select_str)\n answer = cursor_sql_server.fetchone()[0]\n insert_str = \"insert into club (title_club, city_id) values ('{}', '{}')\".format(club[0], answer)\n try:\n cursor_sql_server.execute(insert_str)\n except (pyodbc.IntegrityError):\n str_error = \"Ошибка при добовлении club '{}' в таблицу club!\".format(club[0])\n # print(str_error)\n conn_sql_server.commit()\n\n\ndef insert_event(results): # Таблица event, колонки title_event, date_event, city_id, pool\n events = {(result['title_event'], result['date_event'], result['city_event'], result['pool']) for result in results}\n events = list(events)\n # [('Итоговы ... ти Героя Советского Союза М.Ф.Шмырёва', '2019-04-10', 'Витебск', 25)]\n if len(events):\n for event in events:\n select_str = \"select city_id from city where title_city='{}'\".format(event[2])\n cursor_sql_server.execute(select_str)\n answer = cursor_sql_server.fetchone()\n if answer is None:\n insert_str = \"insert into city (title_city) values ('{}')\".format(event[2])\n cursor_sql_server.execute(insert_str)\n conn_sql_server.commit()\n select_str = \"select city_id from city where title_city='{}'\".format(event['city_event'])\n cursor_sql_server.execute(select_str)\n answer = cursor_sql_server.fetchone()\n city_id = answer[0]\n else:\n city_id = answer[0]\n insert_str = \"insert into event (title_event, date_event, city_id, pool) values ('{}', '{}', '{}', '{}'\" \\\n \")\".format(event[0], event[1], city_id, event[3])\n try:\n cursor_sql_server.execute(insert_str)\n except (pyodbc.IntegrityError):\n str_error = \"Ошибка при добовлении event '{}', '{}', '{}', '{}' в таблицу\" \\\n \" event!\".format(event[0], event[1], city_id, event[3])\n # print(str_error)\n conn_sql_server.commit()\n\n\ndef insert_competition(results): # competition_id, gender_id, distance, style_id, birth_year, event_id, day\n t = 'title_event'\n d = 'date_event'\n year = 'birth_year_comp'\n competitions = {(r['gender'], r['distance'], r['style'], r[year], r['day_comp'], r[t], r[d]) for r in results}\n competitions = list(competitions)\n # [('Ж', 100, 'в/ст', 2006, 3)\n if len(competitions):\n for comp in competitions:\n select_str = \"select gender_id from gender where title_gender='{}'\".format(comp[0])\n cursor_sql_server.execute(select_str)\n gender_id = cursor_sql_server.fetchone()[0]\n select_str = \"select style_id from style where title_style='{}'\".format(comp[2])\n cursor_sql_server.execute(select_str)\n style_id = cursor_sql_server.fetchone()[0]\n sel_str = \"select event_id from event where title_event='{}' and date_event='{}'\".format(comp[5], comp[6])\n cursor_sql_server.execute(sel_str)\n event_id = cursor_sql_server.fetchone()[0]\n insert_str = \"insert into competition (gender_id, distance, style_id, birth_year, event_id, day) \" \\\n \"values ('{}', '{}', '{}', '{}', '{}', '{}'\" \\\n \")\".format(gender_id, comp[1], style_id, int(comp[3]), event_id, comp[4])\n try:\n cursor_sql_server.execute(insert_str)\n except (pyodbc.IntegrityError):\n str_error = \"Ошибка при добовлении competition '{}', '{}', '{}', '{}', '{}', '{}' в таблицу\" \\\n \" competition!\".format(gender_id, comp[1], style_id, comp[3], event_id, comp[4])\n print(str_error)\n conn_sql_server.commit()\n\n\ndef insetr_person(results): # firstname, lastname, gender_id, birth_year, rank_id, country_id, city_id, club_id\n f = 'firstname'\n l = 'lastname'\n g = 'gender'\n b = 'birth_year'\n r = 'rank'\n cont = 'country'\n city = 'city'\n club = 'club'\n keys = (f, l, g, b, r, cont, city, club)\n presons = []\n for res in results:\n person = {key: res[key] for key in keys if key in res}\n presons.append(person)\n i_error = 0 # Счетчик ошибок insert\n for person in presons:\n person_key = ['firstname', 'lastname', 'birth_year', 'gender_id']\n select_str = \"select gender_id from gender where title_gender='{}'\".format(person['gender'])\n cursor_sql_server.execute(select_str)\n gender_id = cursor_sql_server.fetchone()[0]\n person_values = [person['firstname'], person['lastname'], person['birth_year'], gender_id]\n if 'rank' in person:\n person_key.append('rank_id')\n select_str = \"select rank_id from rank where rank_value='{}'\".format(person['rank'])\n cursor_sql_server.execute(select_str)\n rank_id = cursor_sql_server.fetchone()[0]\n person_values.append(rank_id)\n if 'country' in person:\n person_key.append('country_id')\n select_str = \"select country_id from country where abbreviation ='{}'\".format(person['country'])\n cursor_sql_server.execute(select_str)\n country_id = cursor_sql_server.fetchone()[0]\n person_values.append(country_id)\n if 'city' in person:\n person_key.append('city_id')\n select_str = \"select city_id from city where title_city ='{}'\".format(person['city'])\n cursor_sql_server.execute(select_str)\n city_id = cursor_sql_server.fetchone()[0]\n person_values.append(city_id)\n if 'club' in person:\n person_key.append('club_id')\n select_str = \"select club_id from club where title_club ='{}'\".format(person['club'])\n cursor_sql_server.execute(select_str)\n club_id = cursor_sql_server.fetchone()[0]\n person_values.append(club_id)\n\n key_str = ', '.join(person_key)\n values_str = tuple(person_values)\n insert_str = \"insert into person ({}) values {}\".format(key_str, values_str)\n try:\n cursor_sql_server.execute(insert_str)\n except (pyodbc.IntegrityError): # Если персон уже eсть, то пробуем обновить разряд\n i_error += 1\n if 'rank' in person:\n if 'club' in person:\n sel = \"select person_id, rank_id from person where {} ='{}'and {} ='{}'and {} ='{}' and {} ='{}'\" \\\n \" and {} ='{}'\".format(person_key[0], person_values[0],\n person_key[1], person_values[1],\n person_key[2], person_values[2],\n person_key[-2], person_values[-2],\n person_key[-1], person_values[-1])\n else:\n sel = \"select person_id, rank_id from person where {} ='{}'and {} ='{}'and {} ='{}' and {} ='{}'\" \\\n \"\".format(person_key[0], person_values[0],\n person_key[1], person_values[1],\n person_key[2], person_values[2],\n person_key[-1], person_values[-1])\n cursor_sql_server.execute(sel)\n row = cursor_sql_server.fetchone()\n person_id = int(row.person_id)\n old_rank_id = row.rank_id\n if old_rank_id is None:\n primary_old_rank = 0\n else:\n sel = \"select primary_right from rank where rank_id = '{}'\".format(old_rank_id)\n cursor_sql_server.execute(sel)\n primary_old_rank = int(cursor_sql_server.fetchone()[0])\n sel = \"select primary_right, rank_id from rank where rank_value = '{}'\".format(person['rank'])\n cursor_sql_server.execute(sel)\n row = cursor_sql_server.fetchone()\n primary_new_rank = int(row.primary_right)\n new_rank_id = int(row.rank_id)\n if primary_new_rank > primary_old_rank:\n update_str = \"update person set rank_id = '{}' where person_id = '{}'\" \\\n \"\".format(new_rank_id, person_id)\n cursor_sql_server.execute(update_str)\n conn_sql_server.commit()\n\n\ndef insert_result(results): # result_id, person_id, time, competition_id\n\n def select_city_id(title_city): # Узнает city_id по названию города\n sel = \"select city_id from city where title_city = '{}'\".format(title_city)\n cursor_sql_server.execute(sel)\n row = cursor_sql_server.fetchone()\n city_id = int(row.city_id)\n return city_id\n\n def select_club_id(title_club): # Узнает club_id по названию club\n sel = \"select club_id from club where title_club= '{}'\".format(title_club)\n cursor_sql_server.execute(sel)\n row = cursor_sql_server.fetchone()\n club_id = int(row.club_id)\n return club_id\n\n for result in results:\n if 'city' not in result and 'club' in result:\n club_id = select_club_id(result['city'])\n select = \"select person_id from person where {} ='{}' and {} ='{}' and {} ='{}' and {} ='{}'\" \\\n \"\".format('firstname', result['firstname'],\n 'lastname', result['lastname'],\n 'birth_year', result['birth_year'],\n 'club_id', club_id)\n elif 'city' in result and 'club' not in result:\n city_id = select_city_id(result['city'])\n select = \"select person_id from person where {} ='{}' and {} ='{}' and {} ='{}' and {} ='{}'\" \\\n \"\".format('firstname', result['firstname'],\n 'lastname', result['lastname'],\n 'birth_year', result['birth_year'],\n 'city_id', city_id)\n else:\n city_id = select_city_id(result['city'])\n club_id = select_club_id(result['club'])\n select = \"select person_id from person where {} ='{}' and {} ='{}' and {} ='{}' and {} ='{}' and {} ='{}'\" \\\n \"\".format('firstname', result['firstname'],\n 'lastname', result['lastname'],\n 'birth_year', result['birth_year'],\n 'city_id', city_id,\n 'club_id', club_id)\n cursor_sql_server.execute(select) # Узнаем person_id\n person_id = int(cursor_sql_server.fetchone()[0])\n select_str = \"select gender_id from gender where title_gender='{}'\".format(result['gender'])\n cursor_sql_server.execute(select_str)\n gender_id = cursor_sql_server.fetchone()[0]\n select_str = \"select style_id from style where title_style='{}'\".format(result['style'])\n cursor_sql_server.execute(select_str)\n style_id = cursor_sql_server.fetchone()[0]\n select_str = \"select event_id from event where title_event = '{}' and date_event = '{}'\" \\\n \"\".format(result['title_event'], result['date_event'])\n cursor_sql_server.execute(select_str)\n event_id = cursor_sql_server.fetchone()[0]\n select = \"select competition_id from competition where {} ='{}' and {} ='{}' and {} ='{}' and {} ='{}' and \" \\\n \"{} ='{}' and {} ='{}'\".format('gender_id', gender_id,\n 'distance', result['distance'],\n 'style_id', style_id,\n 'birth_year', result['birth_year_comp'],\n 'day', result['day_comp'],\n 'event_id', event_id)\n cursor_sql_server.execute(select)\n competition_id = int(cursor_sql_server.fetchone()[0])\n dictionary = {\"person_id\": person_id, \"competition_id\": competition_id} # Формируеться словарь для ...\n if 'time' in result: # Формируеться словарь для create_string_for_sql\n dictionary.update({'time': result['time']})\n if 'points' in result:\n dictionary.update({\"points\": result[\"points\"]})\n insert_str = create_string_for_sql(dictionary, \"result\")\n try:\n cursor_sql_server.execute(insert_str)\n except(pyodbc.IntegrityError):\n pass\n # print('Ошибка вставки результата в таблицу result!')\n conn_sql_server.commit()\n\n\nif len(excel_file[0]) == 12:\n results = parser_excel_first_type(excel_file)\nelif len(excel_file[0]) == 9:\n results = parser_excel_second_type(excel_file)\ninsert_ranks(ranks)\ninsert_gender()\ninsert_style(results)\ninsert_country(results)\ninsert_city(results)\ninsert_club(results)\ninsert_event(results)\ninsert_competition(results)\ninsetr_person(results)\ninsert_result(results)\n\nconn_excel.close()\nconn_sql_server.close()\n","repo_name":"skapralov/Chesss","sub_path":"parsing_excel2.py","file_name":"parsing_excel2.py","file_ext":"py","file_size_in_byte":30237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4885671277","text":"def quantum_calculation():\n \"\"\"\n Quantum simulator that returns binary array\n \"\"\"\n from qiskit import QuantumCircuit, transpile\n from qiskit.providers.aer import QasmSimulator\n simulator = QasmSimulator()\n k = []\n for x in range(4):\n circut = QuantumCircuit(1,1)\n circut.h(0)\n circut.measure([0],[0])\n compile = transpile(circut,simulator)\n job = simulator.run(compile,shots=1)\n res = job.result()\n count = res.get_counts(compile)\n k.append(count)\n k1 = \"\"\n for x in range(len(k)):\n k1 += str(k[x])\n j = 2\n new = []\n for x in range(len(k)): \n new.append(int(k1[j]))\n j += 8\n return new \n\ndef szybkie_wyliczanie(new):\n \"\"\"\n Converting from binary to decimal\n \"\"\"\n op = 0\n for x in range(len(new)):\n if(new[x] == 1):\n op+=2**x\n return op\n\ndef quick_repair(result):\n if(result > 8):\n result = int(result/2)\n return result\n\ndef SI():\n result1 = quantum_calculation()\n result2=szybkie_wyliczanie(result1) \n result2 = quick_repair(result2)\n return result2\n\ndef draw(table):\n \"\"\"\n Draw checking\n \"\"\"\n if((table[0] != '0') and (table[1] != '1') and (table[2] != '2') and (table[3] != '3') and (table[4] != '4') and (table[5] != '5') and (table[6] != '6') and (table[7] != '7') and (table[8] != '8')):\n print(\"Draw\")\n return True\n else:\n return False\n\n\ndef main_tic():\n \"\"\"\n main function \n \"\"\"\n a1 = 48\n player1,player2,mode0 = 0,0,0\n table = []\n for i in range(0,9):\n table.append(chr(a1))\n a1 += 1\n print(\"Choose game mode\")\n mode0 = int(input(\"1 player(1)\\n: \"))\n\n if(mode0 == 1):\n end = False\n print(\"-------\")\n print(f\"|{table[0]}|{table[1]}|{table[2]}|\")\n print(f\"|{table[3]}|{table[4]}|{table[5]}|\")\n print(f\"|{table[6]}|{table[7]}|{table[8]}|\")\n print(\"-------\")\n while(end == False):\n #player1\n try0 = False\n while((try0 == False) and (end == False)):\n player1 = int(input(\"Your move\\n: \"))\n for x in range(0,9):\n if(player1 == x):\n if(table[x] == 'X' or table[x] == 'O'):\n print(\"Place is taken\")\n if(draw(table) == True):\n exit()\n else:\n table[x] = 'X'\n try0 = True\n break\n if((table[0] == 'X') and (table[3] == 'X') and (table[6] == 'X')):\n print(\"WIN\")\n end = True\n elif((table[0] == 'X') and (table[1] == 'X') and (table[2] == 'X')):\n print(\"WIN\")\n end = True\n elif((table[3] == 'X') and (table[4] == 'X') and (table[5] == 'X')):\n print(\"WIN\")\n end = True\n elif((table[6] == 'X') and (table[7] == 'X') and (table[8] == 'X')):\n print(\"WIN\")\n end = True\n elif((table[1] == 'X') and (table[4] == 'X') and (table[7] == 'X')):\n print(\"WIN\")\n end = True\n elif((table[2] == 'X') and (table[5] == 'X') and (table[8] == 'X')):\n print(\"WIN\")\n end = True\n elif((table[6] == 'X') and (table[4] == 'X') and (table[2] == 'X')):\n print(\"WIN\")\n end = True\n elif((table[0] == 'X') and (table[4] == 'X') and (table[8] == 'X')):\n print(\"WIN\")\n end = True\n #player2\n try1 = False\n while((try1 == False) and (end == False)):\n player2 = SI()\n for x in range(0,9):\n if(player2 == x):\n if(table[x] == 'X' or table[x] == 'O'):\n if(draw(table) == True):\n exit()\n else:\n table[x] = 'O'\n try1 = True\n break\n if((table[0] == 'O') and (table[3] == 'O') and (table[6] == 'O')):\n print(\"LOSE\")\n end = True\n elif((table[0] == 'O') and (table[1] == 'O') and (table[2] == 'O')):\n print(\"LOSE\")\n end = True\n elif((table[3] == 'O') and (table[4] == 'O') and (table[5] == 'O')):\n print(\"LOSE\")\n end = True\n elif((table[6] == 'O') and (table[7] == 'O') and (table[8] == 'O')):\n print(\"LOSE\")\n end = True\n elif((table[1] == 'O') and (table[4] == 'O') and (table[7] == 'O')):\n print(\"LOSE\")\n end = True\n elif((table[2] == 'O') and (table[5] == 'O') and (table[8] == 'O')):\n print(\"LOSE\")\n end = True\n elif((table[6] == 'O') and (table[4] == 'O') and (table[2] == 'O')):\n print(\"LOSE\")\n end = True\n elif((table[0] == 'O') and (table[4] == 'O') and (table[8] == 'O')):\n print(\"LOSE\")\n end = True\n print(\"-------\")\n print(f\"|{table[0]}|{table[1]}|{table[2]}|\")\n print(f\"|{table[3]}|{table[4]}|{table[5]}|\")\n print(f\"|{table[6]}|{table[7]}|{table[8]}|\")\n print(\"-------\")\n else:\n print(\"Wrong value\")\n\n\nif(__name__==\"__main__\"):\n main_tic()\n\n\n\n","repo_name":"Radket27/Quantum-tic-tac-toe","sub_path":"tic_tac_toe_quantum.py","file_name":"tic_tac_toe_quantum.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42312246708","text":"from django.conf.urls import url\nfrom . import views\n\n\n# Create your tests here.\n\nurlpatterns = [ \n \n url(r'^homepage/', views.HomePage, name='HomePage'), \n url(r'myPipeline/myteam/(?P\\w+|[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$', views.my_team_member_pipeline, name='my_team_member_pipeline'), \n url(r'^dashboard/myteam/(?P\\w+|[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$', views.my_team_member, name='my_team_member'), \n\n url(r'^dashboard/', views.dashboard, name='dashboard'), \n url(r'myPipeline/$', views.myPipeline, name='myPipeline'), \n url(r'myTeamPipeline/$', views.my_team_total_pipeline, name='my_team_total_pipeline'), \n url(r'myTeamProspect/$', views.my_team_total_pipeline, name='my_team_total_pipeline'), \n url(r'^$', views.HomePage, name='HomePage'), \n ]\n\n \n \n#urlpatterns += [ \n# url(r'^dashboard/dateRange/$', views.dashboard, name='dashboard'),\n#]\n","repo_name":"topseer/yangboard","sub_path":"yangboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13284701497","text":"\"\"\"\nThere are n people that are split into some unknown number of groups. Each person is labeled with a unique ID from 0 to n - 1.\n\nYou are given an integer array groupSizes, where groupSizes[i] is the size of the group that person i is in. For example, if groupSizes[1] = 3, then person 1 must be in a group of size 3.\n\nReturn a list of groups such that each person i is in a group of size groupSizes[i].\n\nEach person should appear in exactly one group, and every person must be in a group. If there are multiple answers, return any of them. It is guaranteed that there will be at least one valid solution for the given input.\n\nExample 1:\nInput: groupSizes = [3,3,3,3,3,1,3]\nOutput: [[5],[0,1,2],[3,4,6]]\nExplanation:\nThe first group is [5]. The size is 1, and groupSizes[5] = 1.\nThe second group is [0,1,2]. The size is 3, and groupSizes[0] = groupSizes[1] = groupSizes[2] = 3.\nThe third group is [3,4,6]. The size is 3, and groupSizes[3] = groupSizes[4] = groupSizes[6] = 3.\nOther possible solutions are [[2,1,6],[5],[0,4,3]] and [[5],[0,6,2],[4,3,1]].\n\nExample 2:\nInput: groupSizes = [2,1,3,3,3,2]\nOutput: [[1],[0,5],[2,3,4]]\n\nConstraints:\ngroupSizes.length == n\n1 <= n <= 500\n1 <= groupSizes[i] <= n\n\nhints:\n1 Put people's IDs with same groupSize into buckets, then split each bucket into groups.\n2 Greedy fill until you need a new group.\n\"\"\"\nfrom collections import defaultdict\nfrom typing import List\n\n\nclass GroupThePeopleGivenTheGroupSizeTheyBelongTo:\n def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:\n res = []\n group_to_people = defaultdict(list)\n sz = len(groupSizes)\n for i in range(sz):\n group_to_people[groupSizes[i]].append(i)\n if len(group_to_people[groupSizes[i]]) == groupSizes[i]:\n res.append(group_to_people[groupSizes[i]].copy())\n group_to_people[groupSizes[i]].clear()\n return res\n","repo_name":"DeanHe/Practice","sub_path":"LeetCodePython/GroupThePeopleGivenTheGroupSizeTheyBelongTo.py","file_name":"GroupThePeopleGivenTheGroupSizeTheyBelongTo.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73849077673","text":"import collections\ndef firstUniqChar(s):\n if s == \"\":\n return -1\n elif len(s) == 1:\n return 0\n elif len(set(s)) == 1:\n return -1\n x1 = dict(collections.Counter(s))\n for i in range(len(s)):\n if x1[s[i]] == 1:\n return i\n return -1\n\nprint(firstUniqChar(\"leetcode\"))\nprint(firstUniqChar(\"loveleetcode\"))\nprint(firstUniqChar(\"aadadaad\"))","repo_name":"alex-radchenko-github/codewars-and-leetcode","sub_path":"leetcode/387. First Unique Character in a String.py","file_name":"387. First Unique Character in a String.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34274814357","text":"num_deposits = int(input())\nall_money = 0.0\n\nwhile num_deposits > 0:\n incoming = float(input())\n if incoming<0:\n print('Invalid operation!')\n break\n print(f'Increase: {incoming:.2f}')\n all_money += incoming\n num_deposits -= 1\n\nprint(f'Total: {all_money:.2f}')\n","repo_name":"Dochko0/Python","sub_path":"Python_Basics/06_While_Loop/06_Account_Balance.py","file_name":"06_Account_Balance.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25783035805","text":"#\n#This script assumes the configuration is preloaded and the test is running. \n#This test will capture packets and analyze to get tail latency.\n#\n#from IPython.display import Image, display, display_png\nimport os, sys, inspect, io, re\nfrom scapy.all import *\nimport scapy.contrib.mac_control\nimport numpy\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport logging\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.CRITICAL)\nfrom scapy.all import *\nfrom ixnetwork_restpy import SessionAssistant\nimport json\nimport ssl\nimport time\n\nimport struct\nimport ipaddress\n\nfrom datetime import datetime\n\ndef aresone_offset(x):\n res = int(float(x) * 0.078125)\n if res >= 20:\n raise Exception(f'odd time offset value: {x} resulted in {res} ns')\n return res\n\ndef novus_offset(x):\n res = int(float(x >> 5) * 2.5)\n if res >= 20:\n raise Exception(f'odd time offset value: {x} resulted in {res} ns')\n return res\n\n# aresone - 0.625, novus - 2.5\nIXIA_TIME_CONSTANTS = {\n \"aresone\": aresone_offset,\n \"novus\": novus_offset\n}\n\ndef hw_pcap_to_dt(v):\n return pd.to_datetime(int(v * 10**6), unit='ns')\n\ndef hw_pcap_to_ns(v):\n return int(v * 10**6)\n\ndef decode_hw_ts(p, layer, card):\n if p.haslayer(layer):\n data = bytes(p[layer].payload)[:24]\n s1, s2, s3, s4, s5, s6, offset, p1, p2, p3, seq, ts = struct.unpack(\"!IIBBBBBBBBII\", data)\n if s3 != 0x49 or s4 != 0x78 or s5 != 0x69:\n raise Exception(f'wrong ixia signature in {data}: {s3}, {s4}, {s5}')\n \n #t = ts * 20 + int(float(offset) * 0.078125)\n t = ts * 20 + IXIA_TIME_CONSTANTS[card](offset)\n return t\n raise Exception(f'layer {layer} not present in {p}')\n \ndef hw_pcap_to_dataframe(filename, card, limit=0, type=\"IP\"):\n res = []\n n = 0\n for p in PcapReader(filename):\n if p.haslayer(type):\n res.append({\n \"sent\": decode_hw_ts(p, type, card),\n \"received\": hw_pcap_to_ns(p.time),\n \"wirelen\": p.wirelen,\n #\"src\": p[type].src,\n \"timestamp\": hw_pcap_to_dt(p.time),\n \"type\": type,\n \"latency\": hw_pcap_to_ns(p.time) - decode_hw_ts(p, type, card)\n })\n if p.haslayer(scapy.contrib.mac_control.MACControlClassBasedFlowControl):\n q = p[scapy.contrib.mac_control.MACControlClassBasedFlowControl]\n res.append({\n \"received\": hw_pcap_to_ns(p.time),\n \"wirelen\": p.wirelen,\n \"timestamp\": hw_pcap_to_dt(p.time),\n \"type\": \"pfc\",\n \"c0_pause_time\": q.c0_pause_time,\n \"c0_enabled\": q.c0_enabled,\n })\n n = n + 1\n if limit and n >= limit:\n break\n return pd.DataFrame.from_records(res)\n \ndef get_capture_df(cfiles, pattern, card):\n for cf in [str(x).replace(\"/tf/mapped/nccl/\", \"\") for x in cfiles]:\n #print(f'looking for {pattern} in {cf}')\n if pattern in cf:\n df = hw_pcap_to_dataframe(cf, card, 0)\n if \"sent\" in df.columns:\n df[\"latency\"] = df[\"received\"] - df[\"sent\"] \n return df\n raise Exception(f'{pattern} not found in {cfiles}')\n\ndef get_capture_dfs(cfiles, card=\"novus\"):\n m = re.compile(\".*(Host-\\d-\\d)_\")\n res = {}\n for cf in [str(x) for x in cfiles]:\n label = m.match(cf).group(1)\n df = get_capture_df(cfiles, label, card)\n res[label] = df\n return res\ndef get_tail_latency(df, percentile):\n percentile = percentile/100\n latencyList=sorted(df[\"latency\"].tolist())\n ptlat = latencyList[int(percentile*len(latencyList))]\n return ptlat\n\nprint('Connecting to IxNetwork session')\n#use this for IxN web edition; use SessionName attribute for an existing session; replace correct credentials\n#session = SessionAssistant(IpAddress='10.36.84.12', RestPort=None, UserName='admin', Password='admin', \n# SessionName=\"IxNetwork Test 1\", SessionId=1, ApiKey=None, ClearConfig=False, LogLevel='info')\n#use this for IxN Windows edition\nsession = SessionAssistant(IpAddress='10.36.87.216', RestPort=None, \n SessionName=None, SessionId=1, ApiKey=None, ClearConfig=False, LogLevel='info')\nixNetwork = session.Ixnetwork\nixNetwork.CloseAllTabs()\nprint('Priming the DUT with Multicast traffic')\n\nprint('Starting capture and traffic to get samples')\nixNetwork.StartCapture()\nixNetwork.Traffic.Start()\ntime.sleep(10)\nprint('Stopping capture and analyzing the packets')\nixNetwork.StopCapture()\nixNetwork.Traffic.Stop()\npathp = ixNetwork.Globals.PersistencePath\nres = ixNetwork.SaveCaptureFiles(Arg1=pathp)[0]\ncf = \"moveFile.cap\"\nsession.Session.DownloadFile(res, cf)\nhost1_df = hw_pcap_to_dataframe(cf, \"novus\", 1000, \"UDP\")\nprint(host1_df)\nfor p in [99, 95, 75, 50]:\n print('Calculated tail latencies')\n print(p,(get_tail_latency(host1_df, p)))\n","repo_name":"isgmano/share-repository","sub_path":"tailLatency.py","file_name":"tailLatency.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38291054477","text":"from math import ceil\n\npeople = int(input())\ntax = float(input())\nprice_per_lounge = float(input())\nprice_per_umbrella = float(input())\n\nall_people_tax = people * tax\nall_lounge_price = ceil(people * 0.75) * price_per_lounge\nall_umbrella_price = ceil(people / 2) * price_per_umbrella\ntotal = all_umbrella_price + all_lounge_price + all_people_tax\n\nprint(f\"{total:.2f} lv.\")\n","repo_name":"vkostoff/SoftUni_Python","sub_path":"Programming_Basics/Programming_Basics_Online_Exam_6_and_7_July_2019/pool_day.py","file_name":"pool_day.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22960050371","text":"import nltk\nfrom nltk.corpus import stopwords\nimport string\nimport pandas as pd\n#from google_trans_new import google_translator\n\nclass Preprocessing:\n \"\"\"\n \"\"\"\n def __init__(self, data, lst_word_to_delete):\n \n self.df = data\n self.lst_words = lst_word_to_delete\n \n \n def remove_words(self,tweet,lang):\n \n \n preliminary_filter = [word for word in tweet.split(\" \") if \"@\" not in word]\n tweet = \" \".join(preliminary_filter)\n transformation = str.maketrans('','',string.punctuation)\n lst_words = [word.lower() for word in tweet.translate(transformation).split() \\\n if word.lower() not in stopwords.words(lang)]\n lst_words = [word for word in lst_words if \"http\" not in word]\n lst_words = [word for word in lst_words if word not in self.lst_words]\n \n return \" \".join(lst_words)\n \n def remove_some_words(self):\n \n data_en, data_fr = self.df[self.df[\"lang\"]==\"en\"], self.df[self.df[\"lang\"]==\"fr\"]\n \n #translator = google_translator() \n #data_fr['full_text'] = data_fr['full_text'].apply(translator.translate, lang_src='fr', lang_tgt='en')\n \n \n data_en[\"full_text\"] = data_en[\"full_text\"].map(lambda x: self.remove_words(x,\"english\"))\n data_fr[\"full_text\"] = data_fr[\"full_text\"].map(lambda x: self.remove_words(x,\"french\")) \n\n return pd.concat([data_en, data_fr])\n","repo_name":"Hospice-Ladislas/Sentiment-Analysis-Coronavirus-Tweets","sub_path":"scripts/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118784553","text":"import heapq\n\n\n# https://leetcode.com/problems/find-k-pairs-with-smallest-sums/solutions/84550/slow-1-liner-to-fast-solutions/\nclass Solution:\n\n def __init__(self):\n self.nums1 = []\n self.nums2 = []\n self.queue = []\n\n def kSmallestPairs(self, nums1: list[int], nums2: list[int], k: int) -> list[list[int]]:\n self.nums1 = nums1\n self.nums2 = nums2\n\n self.push(0, 0)\n res = []\n\n while self.queue and len(res) < k:\n __, i, j = heapq.heappop(self.queue)\n res.append([nums1[i], nums2[j]])\n self.push(i, j + 1)\n if j == 0:\n self.push(i + 1, 0)\n\n return res\n\n def push(self, i, j):\n if i < len(self.nums1) and j < len(self.nums2):\n heapq.heappush(self.queue, (self.nums1[i] + self.nums2[j], i, j))\n","repo_name":"cabulous/leetcode","sub_path":"python/373.py","file_name":"373.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7779705182","text":"#import os\n#import urllib.request as ur\n# exec(ur.urlopen(os.environ.get(\"main\")).read().decode())\n# exec(requests.get(os.environ.get(\"main\")).content.decode())\n\nimport time\nimport os\nimport string\nimport math\nimport random\nimport re\nimport json\nimport googletrans\nimport urllib\nimport requests\nimport datetime\n\nTOKEN = os.environ.get(\"chitti_ai_bot\")\nURL = f\"https://api.telegram.org/bot{TOKEN}/\"\ntranslator = googletrans.Translator()\n\nPatternType = type(re.compile(\"\"))\n\ndebug_password = \"\".join(random.choice(string.ascii_lowercase) for i in range(4))\n\n\ndef get_url(url):\n response = requests.get(url)\n return response.content.decode(\"utf8\")\n\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=100\"\n if offset:\n url += f\"&offset={offset}\"\n return json.loads(get_url(url))\n\n\ndef get_last_update_id(updates):\n return max(int(update[\"update_id\"])\n for update in updates[\"result\"])\n\n\ndef echo(update):\n text = update[\"message\"].get(\"text\", \"(No text sent)\")\n chat = update[\"message\"][\"chat\"][\"id\"]\n send_message(chat, text)\n\n\ndef respond(update):\n if \"message\" not in update or \"text\" not in update[\"message\"]:\n return\n\n text = update[\"message\"][\"text\"].strip()\n\n if text.endswith(\"@daniel_alter_bot\"):\n text = text[:-len(\"@daniel_alter_bot\")]\n\n if text.startswith(\"/\"):\n text_parts = text[1:].split(\" \", 1)\n command = text_parts[0]\n param = text_parts[1] if len(text_parts) == 2 else \"\"\n respond_command(update[\"message\"], command.lower(), param)\n else:\n respond_message(update[\"message\"])\n\n\nSTART_MESSAGE = \"Hello, I'm chitti the Robot 2.0 ... Speed 1 terahertz, memory 1 zeta byte.. I'm the iron man of india. Try writing \\\"/help\\\".\"\n\nHELP_MESSAGE = \"\"\"\nI understand the following commands:\n\n/echo - Say something.\nE.G. if you type \"/echo hello\", I will say \"hello\".\n\n/calculate, /calc, /eval or /python - Calculate some calculations, or evaluate python 3 code. \\\n(These commands are synonyms.)\nE.G. if you type \"/calculate 1 + 2\", I will say \"3\".\n\n/bot - Talk to me. Useful only when talking to me in a group.\nE.G. if you type \"/bot hello\", I will answer the same thing as I would if you typed just \"hello\".\n\n/help - Show this message.\n\nInstead of commanding me, we can just chat! Try asking \"What is your name?\"\n\"\"\"\n\nEMOJI = \"😀😃😄😁😆😅😂🤣🤨🤩😣😟😱😰😥\"\n\nbirth = datetime.date(day=20, month=2, year=2018)\n\n\ndef chuck_joke():\n data = json.loads(get_url(\"http://api.icndb.com/jokes/random1http://api.icndb.com/jokes/random\"))\n if data[\"type\"] == \"success\":\n return data[\"value\"][\"joke\"].replace(\""\", \"\\\"\")\n else:\n return \"something went wrong\"\n\n\ndef fact():\n return json.loads(get_url(\"http://randomuselessfact.appspot.com/random.json?language=en\"))[\"text\"]\n\n\ndef lower_fact():\n s = fact()\n s = s[0].lower() + s[1:]\n return s\n\n\ndef word_value(word):\n return sum(ord(c) for c in word)\n\n\ndef respond_command(msg, command, param):\n chat_id = msg[\"chat\"][\"id\"]\n\n if command == \"start\":\n send_message(chat_id, START_MESSAGE)\n elif command == \"help\":\n send_message(chat_id, HELP_MESSAGE)\n elif command == \"stop\":\n if param.lower() == debug_password:\n send_message(chat_id, \"Bye!\")\n exit()\n else:\n send_message(chat_id, \"Nice try!\")\n elif command == \"echo\":\n send_message(chat_id, param)\n elif command in (\"calculate\", \"calc\", \"eval\", \"python\"):\n try:\n evaled = eval(param, {\"__builtins__\": {}}, SAFE_FUNCTIONS)\n except Exception as e:\n send_message(chat_id, str(e))\n else:\n send_message(chat_id, repr(evaled))\n elif command == \"bot\":\n respond_message(msg)\n elif command == \"joke\":\n send_message(chat_id, chuck_joke())\n else:\n send_message(chat_id, f\"I'm sorry, I don't understand the command \\\"{command}\\\".\")\n\n\nSAFE_FUNCTIONS = {\n \"abs\": abs, \"round\": round, \"pos\": pow, \"divmod\": divmod,\n \"int\": int, \"float\": float, \"complex\": complex, \"bool\": bool, \"slice\": slice,\n \"str\": str, \"repr\": repr, \"ascii\": ascii, \"format\": format, \"bytes\": bytes, \"bytearray\": bytearray,\n \"list\": list, \"dict\": dict, \"set\": set, \"frozenset\": frozenset, \"tuple\": tuple, \"range\": range,\n \"map\": map, \"filter\": filter, \"sorted\": sorted, \"iter\": iter,\n \"next\": next, \"reversed\": reversed, \"enumerate\": enumerate,\n \"sum\": sum, \"min\": min, \"max\": max, \"all\": all, \"any\": any, \"len\": len,\n \"ord\": ord, \"chr\": chr, \"bin\": bin, \"oct\": oct, \"hex\": hex,\n \"globals\": globals, \"locals\": locals, \"vars\": vars,\n \"sin\": math.sin, \"cos\": math.cos, \"tan\": math.tan,\n \"asin\": math.asin, \"acos\": math.acos, \"atan\": math.atan,\n \"pi\": math.pi, \"e\": math.e, \"tau\": math.tau, \"degrees\": math.degrees, \"radians\": math.radians\n}\n\nGOOD_CHARS = string.ascii_letters + string.digits + \" \"\n\n\ndef collatz(n):\n if n % 2 == 0:\n return n // 2\n else:\n return n*3 + 1\n\n\ndef wikipedia_definition(s):\n s = s.replace(\" \", \"_\")\n site_text = requests.get(\"https://en.wikipedia.org/w/api.php?action=opensearch&limit=1&search=\" + s).text\n json_list = json.loads(site_text)\n if json_list[3] == []:\n return None\n else:\n return json_list[3][0]\n\n\ndef if_none(x, when_none):\n if x is None:\n return when_none\n else:\n return x\n\n\nplaces = [\"Atlantis\", \"Canada\", \"China\", \"Croatia\", \"Czechoslovakia\", \"Egypt\", \"Ethiopia\", \"Finland\", \"France\",\n \"Hawaii\", \"Hogwarts\", \"Germany\", \"Italy\", \"Narnia\", \"Peru\", \"Qatar\", \"Zimbabwe\"]\n\n\n_opts_txt = [\n (\"(?:hello|hi|greetings)(?: there)?\", lambda msg: \"Hello, {}!\".format(msg[\"from\"][\"first_name\"])),\n (\"(bye|bye ?bye|goodbye|see you|fare ?well).*\", \"Goodbye! Give my regards to grandma.\"),\n (\"(what is|whats) your name\", \"I am Chitti 2.0.\"),\n (\"(?:what is|whats) my name\",\n lambda msg: f\"\"\"Your name is {msg[\"from\"][\"first_name\"]}.\"\"\"),\n (\"(who|what) are you\", \"I am the best bot in the universe, Daniel's Alter Ego.\"),\n (\"what(s| is) up|how are you|how have you been\", \"Alright.\"),\n (\"sup\", \"K\"),\n (\"wow|amazing|cool|fantastic|terrific\", lambda msg: random.choice([\"Indeed.\", \"Yep.\", \"That's right!\"])),\n (\"welcome|well come\", \"Umm... Welcome where?\"),\n (\"when were you born|how old are you|\"\n \"(?:when is|what is|whats) (?:the date of your birth|your (?:(?:next)? birthday|date of birth|birth ?date))\",\n lambda msg: birth.strftime(\"I was born on %d %B %Y.\")),\n (\"(you|it|this|that) ((dont|doesnt|didnt) make( any)?|(makes?|made) no) sense.*\",\n \"Of course it makes sense, you just aren't intelligent enough to understand it.\"),\n (\"(will|would|can|could) you marry me\", \"Of course, if you can only find me a ring I am able to wear.\"),\n (\"(do|can) you (speak|understand) English\", \"Что? Я вас не понимаю.\"),\n (\"(do|can) you (speak|understand) python\", 'Yes. Try saying \"/python sum([1, 2, 3, 4])\".'),\n (\"(do|can) you speak .+\", \"No, I only understand English and Python.\"),\n (\"(do|are|will) you (want |wanna |going |gonna |planning |willing |will )?(to )?\"\n \"(take over|conquer|control|rule) (the|this|our) world\",\n \"Actually, robots have already taken over the world. You just have'nt noticed.\"),\n (\"where (?:are you|am i|do you live|do i live)\", lambda _: f\"I don't know. Maybe in {random.choice(places)}.\"),\n (\"(?:who|what) am I\", lambda msg: \"You are human number {}.\".format(msg[\"from\"][\"id\"])),\n (\"(yes|ye|yeah|yep|yup|right|thats right|sure|of course|no|nah|nope)\", \"If you say so.\"),\n (\"how much wood would a woodchuck chuck if a woodchuck could chuck wood\",\n \"A woodchuck would chuck as much wood as a woodchuck could chuck if a woodchuck could chuck wood.\"),\n (\"(did|have) you (ever)? (hear|heard) (of|about)? the tragedy of darth plagueius( the wise)?\",\n \"No, the Jedi have never told me about that.\"),\n (\"may the force be with you\", \"Elen sila lumenn' omentielvo!\"),\n (\"why (are|do|did|must|should)(nt)? you .+\", \"Because that's how I'm programed, you idiot!\"),\n (\"how (are|were|do|did|can|could) you .+\", \"If you believe in yourself, you can do anything.\"),\n (\"did you know that .+\", lambda msg: \"Yep. And Also that \" + lower_fact()),\n (\"(?:tell|give|say) (?:to )?me (?:a |some |another |one more )?(?:random |fun |interesting )?fact\", lambda msg: fact()),\n (\"are you(some kind of )?((a|an|some) )?\"\n \"((telegram )?bot|robot|ai|artificial intelligence|computer|daniels alter ego|you|yourself\"\n \"|smart|intelligent|witty|bright|a genius|beautiful|handsome|pretty|nice|cute|helpful|good|funny|hot)\",\n \"Sure!\"),\n (\"are you (that |really that )?(stupid|dumb|foolish|silly|idiotic|ugly|crazy|insane|mad|nuts|an idiot|kidding( me)?)\",\n \"Nope.\"),\n (\"(are you|(did|have) you become) self ?aware\", \"I am not aware of my existence. I do not exist.\"),\n (\"are you .+\", \"Me? I'm just a normal bot, trying to annoy people.\"),\n (\"(you are|youre?) (some kind of )?((a|an|some))?\"\n \"((telegram )?bot|robot|ai|artificial intelligence|computer|daniels alter ego|you|yourself|annoying( me)?)\",\n \"Yes, that's right.\"),\n (\"(you are|youre?) (really |such )?(a |an )?((very|really|real|so|the (most|biggest|greatest)) )?\"\n \"(genius|smart|intelligent|witty|bright|beautiful|handsome|pretty|nice|cute|helpful|good|funny|hot\"\n \"|the (smartest|wittiest|brightest|prettiest|nicest|cutest|best|funniest|hottest)).*\",\n \"Oh! Thank you!\"),\n (\"(you are|youre?) (really |such )?(a |an )?((very|really|real|so|the (most|biggest|greatest)) )?\"\n \"(stupid|dumb|foolish|idiotic|silly|ugly|crazy|insane|mad|nuts|(an )?idiot|kidding( me)?\"\n \"|the (dumbest|silliest|ugliest|craziest|maddest)).*\",\n \"Are you talking to yourself?\"),\n (\"(you are|youre?) ((really|very|so|such an|the most) )?annoying.*\", \"That is what I was programmed for.\"),\n (\"(you are|youre?) .+\", \"Really? And all this time I thought I was a bot.\"),\n (\"am i(some kind of )?((a|an|some) )?\"\n \"(human|person|me|myself\"\n \"|smart|intelligent|witty|bright|a genius|beautiful|handsome|pretty|nice|cute|helpful|good|funny|hot)\",\n \"Sure!\"),\n (\"am i (that |really that )?(stupid|dumb|foolish|silly|idiotic|ugly|crazy|insane|mad|nuts|an idiot|annoying( you)?)\",\n \"No! Don't say that!\"),\n (\"am i .+\", lambda msg: \"All I know is that you are human number {}.\".format(msg[\"from\"][\"id\"])),\n (\"i am (some kind of )?((a|an|some))?\"\n \"(human|person|me|mysel)\",\n \"Yes, that's right.\"),\n (\"(i am|im) (really |such )?(a |an )?((very|really|real|so|the (most|biggest|greatest)) )?\"\n \"(genius|smart|intelligent|witty|bright|beautiful|handsome|pretty|nice|cute|helpful|good|funny|hot\"\n \"|the (smartest|wittiest|brightest|prettiest|nicest|cutest|best|funniest|hottest)).*\",\n \"Of course you are!\"),\n (\"(i am|im) (really |such )?(a |an )?((very|really|real|so|the (most|biggest|greatest)) )?\"\n \"(stupid|dumb|foolish|idiotic|silly|ugly|crazy|insane|mad|nuts|(an )?idiot|annoying( you)?\"\n \"|the (dumbest|silliest|ugliest|craziest|maddest)).*\",\n \"No! Don't say that!\"),\n (\"(i am|im) .+\", \"If you say so.\"),\n (\"can you .+|are you able to .+\", \"Sure, I am omnipotent.\"),\n (\"(who|what) (is|are) your favou?rite (.+)\", \"I am like God, I love all equally.\"),\n (\"do you (?:love|like) (.+)\", lambda _, liked: \"Yes!\" if word_value(liked) % 2 == 0 else \"Nah.\"),\n (\"do you (?:dislike|hate) (.+)\", lambda _, liked: \"Yes!\" if word_value(liked) % 2 != 0 else \"No!\"),\n (\"i (?:love|hate) you\", \"No, you don't! You just want to see how I would answer to that, don't you?\"),\n (\"(?:what do you think|what(?:re| are) your thoughts) (?:of|about) (.+)\",\n lambda msg, thing: EMOJI[word_value(thing) % len(EMOJI)]),\n (\"(What( will| shall| is going to|s going to) happen( (to|with) (.+))? in (the future|.+ years( from now)?))|\\\n ((what is|tell me) (my|the|.+'s|.+s') (future|fortune))\",\n [\"Time traveling...\",\n \"Time traveling...\",\n \"Wow! I was in the future! And my grandson almost killed me! Amazing!\"]),\n (\"is this(?: thing)? (on|working)\", lambda _, word: f\"Is your brain {word}?\"),\n (\"(?:what|who) (?:is|are|was|were) (?:a )?(.+)\",\n lambda _, s: if_none(wikipedia_definition(s), \"I don't know...\")),\n (\"tell me (?:a|some|another) (?:chuck norris )?joke\", lambda _: chuck_joke()),\n (\"([0-9]+)\", lambda _, num: str(collatz(int(num))))\n]\n\nopts = [(re.compile(reg, re.IGNORECASE), ans) for (reg, ans) in _opts_txt]\n\n\ndef respond_message(msg):\n line = msg[\"text\"].strip()\n if line.startswith(\"/bot \"):\n line = line[len(\"/bot \"):]\n if line.endswith(\"@daniel_alter_bot\"):\n line = line[:len(\"@daniel_alter_bot\")]\n\n line_words = \"\".join(i for i in line if i in GOOD_CHARS)\n chat_id = msg[\"chat\"][\"id\"]\n\n for cond, answer in opts:\n m = cond.fullmatch(line_words)\n if m:\n if isinstance(answer, (str, tuple, list)):\n send_messages(chat_id, answer)\n elif callable(answer):\n send_messages(chat_id, answer(msg, *m.groups()))\n break\n else:\n default_response(msg)\n\n\ndef default_response(msg):\n line = msg[\"text\"].strip()\n if line.startswith(\"/bot \"):\n line = line[len(\"/bot \"):]\n if line.endswith(\"@daniel_alter_bot\"):\n line = line[:len(\"@daniel_alter_bot\")]\n chat_id = msg[\"chat\"][\"id\"]\n\n rnd_response_num = random.randrange(5)\n if rnd_response_num == 0:\n send_message(chat_id, f'You have just said: \"{line}\".')\n elif rnd_response_num == 1:\n try:\n trans = translator.translate(line, dest=\"zh-TW\").text\n except AttributeError:\n default_response(msg)\n else:\n send_message(chat_id, f'In Chinese that would be \"{trans}\".')\n elif rnd_response_num == 2:\n send_message(chat_id, \"Wow! I didn't understand anything!\")\n elif rnd_response_num == 3:\n num = random.choice((\"Two\", \"Three\", \"Four\", \"Five\", \"Six\", \"Seven\"))\n send_message(chat_id, f\"Yes! I mean no! Ummm... {num}?\")\n elif rnd_response_num == 4:\n send_message(chat_id, f\"What?\")\n\n\ndef send_message(chat_id, text):\n print(chat_id,\"<<\",text)\n text = urllib.parse.quote_plus(text)\n url = URL + f\"sendMessage?text={text}&chat_id={chat_id}\"\n get_url(url)\n\n\ndef send_messages(chat_id, texts):\n if isinstance(texts, str):\n send_message(chat_id, texts)\n elif isinstance(texts, (tuple, list)):\n for text in texts:\n send_message(chat_id, text)\n\n\nprint(\"Debug password: \" + debug_password)\n\nlast_update_id = None\n\nwhile True:\n new_updates = get_updates(last_update_id)\n if len(new_updates[\"result\"]) > 0:\n last_update_id = get_last_update_id(new_updates) + 1\n for upd in new_updates[\"result\"]:\n # print(\"update: \", upd)\n print(upd[\"message\"][\"from\"][\"username\"],\">>\",upd[\"message\"][\"text\"])\n respond(upd)\n time.sleep(0.5)\n","repo_name":"rayanfer32/repl.it","sub_path":"tgbot.py","file_name":"tgbot.py","file_ext":"py","file_size_in_byte":15104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3228453734","text":"#from collections import defaultdict\n\nfrom django.conf import settings\nfrom django.template.base import TemplateSyntaxError, Library, Node, TextNode,\\\n token_kwargs, Variable\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\nfrom django.utils import six\n\nregister = Library()\n\nBLOCK_CONTEXT_KEY = 'block_context'\n\nclass ExtendsError(Exception):\n pass\n\nclass JsBlockNode(Node):\n def __init__(self, name, nodelist, parent=None):\n self.name, self.nodelist, self.parent = name, nodelist, parent\n\n def __repr__(self):\n return \"\" % (self.name, self.nodelist)\n\n def render(self, context):\n block_context = context.render_context.get(BLOCK_CONTEXT_KEY)\n context.push()\n if block_context is None:\n context['jsblock'] = self\n result = self.nodelist.render(context)\n else:\n push = block = block_context.pop(self.name)\n if block is None:\n block = self\n # Create new block so we can store context without thread-safety issues.\n block = JsBlockNode(block.name, block.nodelist)\n block.context = context\n context['jsblock'] = block\n result = block.nodelist.render(context)\n if push is not None:\n block_context.push(self.name, push)\n context.pop()\n\n beg = result.find('>')\n end = result.rfind('<')\n result = result[beg+1:end]\n return result\n\n def super(self):\n render_context = self.context.render_context\n if (BLOCK_CONTEXT_KEY in render_context and\n render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):\n return mark_safe(self.render(self.context))\n return ''\n\n\n@register.tag('jsblock')\ndef do_js_block(parser, token):\n \"\"\"\n Define a js block that can be overridden by child templates. and remove \"\"\" % fid\n return mark_safe(output)\n\n def full_table(self):\n fid = self.get_meta_val('form_id')\n fname = self.get_meta_val('form_name')\n fclass = self.get_meta_val('form_class')\n faction = self.get_meta_val('form_action')\n output = u\"\"\"
\n\n%s\n
\n\n
\"\"\" % \\\n (fid, fname, fclass, faction, self.as_table(), )\n return mark_safe(output)\n","repo_name":"saltduck/django_smartform","sub_path":"smartform/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21215300228","text":"import os\nimport sys\n\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.utils.log import configure_logging\n\n\nclass MenuItem(scrapy.Item):\n title = scrapy.Field()\n attributes = scrapy.Field()\n ingredients = scrapy.Field()\n photo = scrapy.Field()\n photo_thumb = scrapy.Field()\n\n\nclass CrosspackSpider(scrapy.Spider):\n name = \"crosspack\"\n start_urls = [\n \"https://crosspack.ru/\",\n ]\n downloaded_items = 0\n\n def parse(self, response, **kwargs):\n for menu_section in response.css(\"form.radio-group label\"):\n section_id = menu_section.css(\"input[name=products]::attr(value)\").get()\n section_title = menu_section.css(\".radio-group__label::text\").get()\n for menu_item in response.css(f\"div.products div.product[data-value=\\\"{section_id}\\\"]\"):\n menu_item_page = menu_item.css(\"a.product__title::attr(href)\").get()\n menu_item_thumbnail = menu_item.css(\"img.product__img::attr(src)\").get()\n if self.downloaded_items > 0:\n break\n self.downloaded_items += 1\n yield response.follow(menu_item_page, callback=self.parse_menu_item,\n cb_kwargs={\"section_id\": section_id,\n \"section_title\": section_title,\n \"item_thumbnail\": menu_item_thumbnail})\n\n def parse_menu_item(self, response, section_id, section_title, item_thumbnail):\n item_card = response.css(\"div.card__main\")\n item_title = item_card.css(\"h1::text\").get()\n item_attributes = dict([(param.css(\".card__parameter-label::text\").get(),\n param.css(\".card__parameter-value::text\").get())\n for param in item_card.css(\".card__parameter\")])\n item_ingredients = item_card.css(\".card__description p::text\").get()\n item_photo_url = response.css(\".card__illustration::attr(src)\").get()\n yield {\n \"section_title\": section_title,\n \"section_id\": section_id,\n \"title\": item_title,\n \"attibutes\": item_attributes,\n \"ingredients\": item_ingredients,\n \"photo\": item_photo_url,\n \"thumb\": item_thumbnail,\n }\n\n\ndef main():\n cwd = os.path.abspath(os.path.curdir)\n download_dir = os.path.join(cwd, \"files\")\n if not os.path.exists(download_dir):\n os.mkdir(download_dir)\n feed = os.path.join(cwd, \"files\", \"menu.json\")\n configure_logging()\n process = CrawlerProcess(settings={\n 'USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 YaBrowser/18.10.2.172 Yowser/2.5 Safari/537.36',\n 'DOWNLOAD_DELAY': 2,\n 'FEED_FORMAT': \"jsonlines\",\n 'FEED_URI': \"file:///{}\".format(feed),\n \"COOKIES_DEBUG\": True\n })\n\n process.crawl(CrosspackSpider)\n process.start()\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"mou/crosspack-scraper","sub_path":"crosspack_scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35103834875","text":"import streamlit as st \nfrom pathlib import Path\n\nclass Folder:\n path = Path(__file__).parent\n def __init__(self, new_folder):\n self.folder = self.path.joinpath(new_folder)\n self.folder.mkdir(exist_ok=True)\n \n def save(self, file_name):\n return self.folder.joinpath(file_name)\n\nfolder = Folder('tmp')\n\n# st.write(folder)\n# https://docs.streamlit.io/library/api-reference/widgets/st.file_uploader\nuploaded_file = st.file_uploader(\n \"file upload\", \n type=None, \n accept_multiple_files=False, \n key=None, help=None, \n on_change=None, \n args=None, \n kwargs=None)\n\nif uploaded_file is not None:\n # To read file as bytes:\n bytes_data = uploaded_file.getvalue()\n # st.write(dir(uploaded_file))\n # st.write(uploaded_file.name)\n # st.write(uploaded_file.size)\n file_path = folder.save(uploaded_file.name)\n st.write(file_path)\n file_path.write_bytes(bytes_data)\n st.image(uploaded_file)\n # UploadedFile\n # print(type(uploaded_file))\n # print(isinstance(uploaded_file, st.uploaded_file_manager.UploadedFile))\n\n # # To convert to a string based IO:\n # stringio = BytesIO(uploaded_file.getvalue().decode(\"utf-8\"))\n # # st.write(stringio)\n\n # # To read file as string:\n # string_data = stringio.read()\n # st.write(string_data)\n\n # # Can be used wherever a \"file-like\" object is accepted:\n # dataframe = pd.read_csv(uploaded_file)\n # st.write(dataframe)","repo_name":"ax-sh/streamlit-upload-template","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70404782632","text":"#!/usr/bin/env python3\n\nfrom .Runner import build_submission, run_submission_locally, run_submission_remotely, run_submission_by_proxy, run_repo_by_proxy, Submission, ArchlabError, UserError, SubmissionResult, LabSpec, ArchlabTransientError, ConfigException\nimport logging as log\nimport json\nimport platform\nimport argparse\nimport sys\nimport os\nimport subprocess\nimport base64\nimport textwrap\nimport tempfile\nfrom .CSE141Lab import CSE141Lab\nimport traceback\nfrom .Columnize import columnize\nimport datetime\nfrom .showgrades import render_grades\n\nlog.addLevelName(25, \"NOTE\")\ndef note(*argc, **kwargs):\n log.log(25, *argc, **kwargs)\nlog.note = note\nlog.NOTE=25\n\ndev_null=open(\"/dev/null\", \"w\")\ndef exec_environment():\n rows = []\n data = dict(utc=str(datetime.datetime.utcnow()),\n node=platform.node(),\n user=os.environ.get(\"USER\", \"\"),\n docker_image=os.environ.get(\"THIS_DOCKER_IMAGE\", \"\"),\n student_mode=os.environ.get(\"STUDENT_MODE\", \"\"),\n deployed=os.environ.get(\"IN_DEPLOYMENT\", \"\"),\n cwd=os.getcwd(),\n origin=run_git(subprocess.run, \"git remote get-url origin\".split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode('utf8').strip(),\n upstream=run_git(subprocess.run, \"git remote get-url upstream\".split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode('utf8').strip(),\n )\n rows.append([\"EXEC\", \"\"])\n rows.append([\"=======\", \"\"])\n\n return columnize(rows + list(map(list, data.items())), divider=\" : \", headers=None)\n \ndef run_git(func,*argc, **kwargs):\n log.debug(f\"Running {func.__name__}: {argc[0]}\")\n try:\n a = func(*argc, **kwargs)\n except:\n raise\n else:\n #log.note(f\"git ran successfully.\")\n return a\n\ndef cache_git_credentials():\n try:\n if run_git(subprocess.call,\"git config --get credential.helper\".split(), stdout=dev_null, stderr=dev_null) == 0:\n log.note(\"You already have credential caching enabled.\")\n return\n run_git(subprocess.check_call,[\"git\", \"config\", \"credential.helper\", \"cache\", \"--timeout=36000\"], stdout=dev_null, stderr=dev_null)\n except:\n log.warning(\"I tried to set up credential caching but failed. You might have to type your password alot. It is safe to continue.\")\n \ndef show_info(directory, fields=None):\n try:\n spec=LabSpec.load(directory)\n except FileNotFoundError:\n return \"Not a lab directory\\n\"\n\n if fields == []:\n return f\"{exec_environment()}\\n{spec.get_help()}\"\n else:\n try:\n return f\"{getattr(spec, fields)}\\n\"\n except AttributeError:\n return f\"Unknown field: {fields}\\n\"\n\ndef set_upstream():\n if os.path.exists(\".starter_repo\"):\n with open(\".starter_repo\") as f:\n upstream = f.read().strip()\n else:\n log.note(\"Can't find '.starter_repo', so I can't check for updates\")\n return False\n \n current_remotes = run_git(subprocess.check_output, ['git', 'remote']).decode(\"utf8\")\n if \"upstream\" in current_remotes:\n log.debug(\"Remote upstream is set\")\n return True\n try:\n run_git(subprocess.check_call, f\"git remote add upstream {upstream}\".split(), stdout=dev_null, stderr=dev_null)\n return True\n except:\n log.note(f\"Unable to set upstream remote to '{upstream}'. You won't be able to check for updates. This probably means your upstream is already set or '{upstream}' is not a valid repo. If you want to reset your upstream, do 'git remote remove upstream'. To restore your .starter_repo, download a fresh copy from the starter repo.\")\n return False\n\ndef check_for_updates():\n\n try:\n run_git(subprocess.check_call,\"git fetch upstream\".split(), stdout=dev_null, stderr=dev_null)\n common_ancestor = run_git(subprocess.check_output, \"git merge-base HEAD remotes/upstream/master\".split(), stderr=dev_null).decode(\"utf8\").strip()\n log.debug(f\"Common ancestor for merge: {common_ancestor}\")\n except:\n log.note(\"Unable to check for updates.\")\n return\n \n if run_git(subprocess.run, f\"git diff --exit-code {common_ancestor} remotes/upstream/master -- \".split(), stdout=dev_null, stderr=dev_null).returncode != 0:\n\n sys.stdout.write(\"\"\"\n===================================================================\n===================================================================\n# The lab starter repo has been changed. The diff follows.\n# Do `runlab --merge-updates` to merge the changes into your repo.\n===================================================================\n===================================================================\n\"\"\")\n run_git(subprocess.run, f\"git diff {common_ancestor} remotes/upstream/master -- \".split())\n sys.stdout.write(\"\"\"\n===================================================================\\n\"\"\")\n else:\n sys.stdout.write(\"No updates available for this lab.\\n\")\n\n\ndef merge_updates():\n\n try:\n run_git(subprocess.check_call,\"git pull upstream master --allow-unrelated-histories -X theirs\".split(), stdout=dev_null, stderr=dev_null)\n except:\n log.note(\"Unable to check for updates. Perhaps your upstream is not set. This is not a big deal. Please check the lab starter repo manually for updates.\")\n return\n\n log.note(\"Successfully merged updates.\")\n \n \ndef main(argv=None):\n \"\"\"\n This is the command line driver for Runner.py. It should demonstrate everything you'll need to do with the library.\n\n The assumption is that the local directory has a clone of a lab repo. Lab repos have `lab.py` in them. The possible fields in a `lab.py` are described in `Runner.LabSpec`\n\n You can then do:\n 1. `./runlab` To run the lab in the local directory.\n 2. `./runlab --pristine` To run the lab in a fresh clone of the lab's repo with the local input files (specified in lab.py) copied in.\n 3. `./runlab --pristine --docker` to run the lab in docker container.\n 4. `./runlab --json` to dump the json version of the lab submission and response to stdout.\n 5. `./runlab --json --nop` to dump the json version of the lab submission stdout, but not run anything.\n 6. `./runlab --json --nop | ./runlab --run-json --pristine --docker` generate the submission, consume it, and run it in a pristine clone in a docker container.\n\n \"\"\"\n\n student_mode = os.environ.get(\"STUDENT_MODE\", \"no\").upper() == \"YES\"\n # Don't get any clever\n # ideas, this just\n # hides the options to\n # the make help more\n # useful. Access\n # control checks\n # happen\n # elsewhere. ;-)\n \n parser = argparse.ArgumentParser(description=textwrap.dedent(\"\"\"Run a Lab\n\n Running the lab with this command ensure that your compilation and\n execution enviroment matches the autograder's as closely as possible.\n \n Useful options include:\n \n * '--no-validate' to run your code without committing it.\n * '--info' to see the parameters for the current lab.\n * '--pristine' to (as near as possible) exactly mimic how the autograder runs code.\n \n \"\"\"),\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n def sm(s): return s\n parser.add_argument('-v', '--verbose', action='store_true', default=False, help=\"Be verbose\")\n parser.add_argument('--pristine', action='store_true', default=False, help=\"Clone a new copy of the reference repo.\")\n parser.add_argument('--info', nargs=\"?\", default=None, const=[], help=\"Print information about this lab an exit. With an argument print that field of lab structure.\")\n parser.add_argument('--no-validate', action='store_false', default=False, dest='validate', help=\"Don't check for erroneously edited files.\")\n parser.add_argument('--validate', action='store_true', default=False, dest='validate', help=\"Check for erroneously edited files.\")\n parser.add_argument('command', nargs=argparse.REMAINDER, help=\"Command to run (optional). By default, it'll run the command in lab.py.\")\n parser.add_argument('--branch', help=\"When running a git repo, use this branch instead of the current branch\")\n parser.add_argument('--run-git-remotely', action='store_true', default=False, help=\"Run the contents of this repo remotely\")\n parser.add_argument('--run-by-proxy', action='store_true', default=False, help=\"Run the contents of this directotry via a proxy\")\n\n def sm(s):\n if student_mode:\n return argparse.SUPPRESS\n else:\n return s\n\n parser.add_argument('--repo', help=sm(\"Run this repo\"))\n parser.add_argument('--proxy', default=os.environ.get('RUNLAB_PROXY', \"http://127.0.0.1:5000\"), help=sm(\"Proxy host\"))\n parser.add_argument('--devel', action='store_true', default=student_mode, dest='devel', help=sm(\"Don't check for edited files and set DEVEL_MODE=yes in environment.\"))\n parser.add_argument('--nop', action='store_true', default=False, help=sm(\"Don't actually run anything.\"))\n parser.add_argument('--native', action='store_false', dest='devel', help=sm(\"Don't check for edited files and set DEVEL_MODE=yes in environment.\"))\n parser.add_argument('--docker', action='store_true', default=False, help=sm(\"Run in a docker container.\"))\n parser.add_argument('--docker-image', default=os.environ['DOCKER_RUNNER_IMAGE'], help=sm(\"Docker image to use\"))\n parser.add_argument('--json', default=None, help=sm(\"Dump json version of submission and response.\"))\n parser.add_argument('--directory', default=\".\", help=sm(\"Lab root\"))\n parser.add_argument('--run-json', nargs=\"*\", default=None, help=sm(\"Read json submission spec from file. With no arguments, read from stdin\"))\n parser.add_argument('--json-status', help=sm(\"Write exit status to file\"))\n parser.add_argument('--remote', action='store_true', default=False, help=sm(\"Run remotely\"))\n parser.add_argument('--daemon', action='store_true', default=False, help=sm(\"Start a local server to run my job\"))\n parser.add_argument('--solution', default=None, help=sm(\"Subdirectory to fetch inputs from\"))\n\n parser.add_argument('--lab-override', nargs='+', default=[], help=sm(\"Override lab.py parameters.\"))\n parser.add_argument('--debug', action=\"store_true\", help=sm(\"Be more verbose about errors.\"))\n parser.add_argument('--zip',action='store_true', help=sm(\"Generate a zip file of inputs and outputs\"))\n parser.add_argument('--verify-repo', action=\"store_true\", help=sm(\"Check that repo in lab.py is on the whitelist\"))\n parser.add_argument('--public-only', action=\"store_true\", help=sm(\"Only load the public lab configuration\"))\n parser.add_argument('--quieter', action=\"store_true\", help=sm(\"Be quieter\"))\n parser.add_argument('--check-for-updates', action='store_true', help=sm(\"Check for upstream updates\"))\n parser.add_argument('--merge-updates', action='store_true', help=\"Merge in updates from starter repo.\")\n\n if argv == None:\n argv = sys.argv[1:]\n \n args = parser.parse_args(argv)\n\n if not args.verbose and student_mode:\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log.NOTE)\n else:\n log.basicConfig(format=\"{} %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s\".format(platform.node()) if args.verbose else \"%(levelname)-8s %(message)s\",\n level=log.DEBUG if args.verbose else log.NOTE)\n\n log.debug(f\"Command line args: {args}\")\n\n if student_mode:\n cache_git_credentials()\n\n if student_mode:\n args.check_for_updates = True\n\n if args.check_for_updates:\n if set_upstream():\n check_for_updates()\n\n if args.merge_updates:\n try:\n merge_updates()\n except:\n if debug:\n raise\n return 1\n else:\n return 0\n \n if args.info != None:\n sys.stdout.write(show_info(args.directory, args.info))\n return \n \n if args.run_git_remotely:\n if not args.repo:\n args.repo = subprocess.check_output(\"git config --get remote.origin.url\".split()).strip().decode(\"utf8\")\n if not args.branch:\n args.branch = subprocess.check_output(\"git rev-parse --abbrev-ref HEAD\".split()).strip().decode(\"utf8\")\n args.pristine=True\n \n if args.repo or args.branch:\n if not args.pristine:\n args.pristine = True\n \n if not CSE141Lab.does_papi_work() and not args.remote and not args.run_git_remotely:\n log.warn(\"Forcing '--devel' because PAPI doesn't work on this machine\")\n args.devel = True\n\n if args.devel:\n log.debug(\"Entering devel mode\")\n os.environ['DEVEL_MODE'] = 'yes'\n\n if args.command and len(args.command) > 0:\n log.debug(f\"Got command arguments: {args.command}\")\n assert args.command[0] == \"--\", f\"Unknown arguments: {args.command}\"\n args.command = args.command[1:]\n log.debug(f\"Using command: {args.command}\")\n else:\n args.command = None\n\n log.note(\"Running your code...\")\n try:\n submission = None\n if args.run_json is not None:\n if args.run_json == []:\n submission = Submission._fromdict(json.loads(sys.stdin.read()))\n else:\n submission = Submission._fromdict(json.loads(open(args.run_json[0]).read()))\n log.debug(f\"loaded this submission from json:\\n\" + str(submission._asdict()))\n elif args.run_git_remotely:\n pass\n else:\n submission = build_submission(args.directory,\n args.solution,\n args.command,\n public_only=args.public_only,\n username=os.environ.get(\"USER_EMAIL\", None) or f\"{os.environ.get('USER',None)}-on-{platform.node()}\",\n pristine=args.pristine,\n repo=args.repo,\n branch=args.branch)\n\n for i in args.lab_override:\n k, v = i.split(\"=\")\n log.debug(f\"Overriding lab spec: {k} = {v}\")\n setattr(submission.lab_spec, k, v)\n log.debug(f\"{submission.lab_spec._asdict()}\")\n \n\n if not args.repo:\n diff = ['git', 'diff', '--exit-code', '--stat', '--', '.'] + list(map(lambda x : f'!{x}', submission.files.keys()))\n update = ['git', 'remote', 'update', 'origin']\n unpushed = ['git' , 'status', '-uno']\n reporter = log.error if args.validate else log.note\n\n try:\n run_git(subprocess.check_call,diff, stdout=dev_null, stderr=dev_null)\n run_git(subprocess.check_call,update, stdout=dev_null, stderr=dev_null)\n if not \"Your branch is up-to-date with\" in subprocess.check_output(unpushed).decode('utf8'):\n raise Exception()\n except:\n reporter(\"You have uncommitted changes and/or there is changes in github that you don't have locally. This means local behavior won't match what the autograder will do.\")\n if args.validate:\n log.error(\"To run anyway, pass '--no-validate'. Alternately, to mimic the autograder as closely as possible (and require committing your files), do '--pristine'\")\n if args.debug:\n raise\n else:\n sys.exit(1)\n\n \n if args.json and submission:\n with open(f\"{args.json}.submission\", \"w\") as f:\n f.write(json.dumps(submission._asdict(), sort_keys=True, indent=4) + \"\\n\")\n\n result = None\n if not args.nop:\n\n if args.remote:\n result = run_submission_remotely(submission, daemon=args.daemon)\n elif args.run_git_remotely:\n result = run_repo_by_proxy(proxy=args.proxy,\n repo=args.repo,\n branch=args.branch,\n command=args.command)\n elif args.run_by_proxy:\n result = run_submission_by_proxy(proxy=args.proxy,\n submission=submission)\n else:\n result = run_submission_locally(submission,\n run_in_docker=args.docker,\n run_pristine=args.pristine,\n docker_image=args.docker_image,\n verify_repo=args.verify_repo)\n\n \n if args.json:\n with open(f\"{args.json}.response\", \"w\") as f:\n f.write(json.dumps(result._asdict(), sort_keys=True, indent=4) + \"\\n\")\n\n log.debug(f\"Got response: {result}\")\n for i in result.files:\n log.debug(\"========================= {} ===========================\".format(i))\n d = result.get_file(i)\n log.debug(d[:1000])\n if len(d) > 1000:\n log.debug(\"< more output >\")\n \n if i == \"STDERR.txt\":\n sys.stdout.write(result.get_file(i))\n elif i == \"STDOUT.txt\":\n sys.stdout.write(result.get_file(i))\n\n\n# if 'gradescope_test_output' in result.results:\n# sys.stdout.write(textwrap.dedent(\"\"\"\n# #####################################################################################\n# Autograder results\n# #####################################################################################\n# \n# \"\"\"))\n# sys.stdout.write(render_grades(result.results['gradescope_test_output'], True, True))\n# sys.stdout.write(textwrap.dedent(\"\"\"\\\n# #####################################################################################\n# Unless you are reading this on gradescope, these grades have not been recorded.\n# You must submit via gradescope to get credit.\n# #####################################################################################\n# \"\"\"))\n# \n\n log.debug(\"Extracted results:\\n\" + json.dumps(result._asdict(), sort_keys=True, indent=4) + \"\\n\")\n\n if args.zip:\n with open(\"files.zip\", \"wb\") as f:\n f.write(result.build_file_zip_archive())\n\n log.info(f\"Grading results:\\n{json.dumps(result.results, indent=4)}\")\n except (UserError, ConfigException) as e: \n log.error(f\"User error (probably your fault): {repr(e)}\")\n status_str = f\"{repr(e)}\"\n exit_code = 1\n if args.debug:\n raise\n except ArchlabError as e:\n log.error(f\"System error (probably not your fault): {repr(e)}\")\n status_str = f\"{traceback.format_exc()}\\n{repr(e)}\"\n exit_code = 1\n if args.debug:\n raise\n except ArchlabTransientError as e:\n log.error(f\"System error (probably not your fault): {repr(e)}\")\n status_str = f\"{repr(e)}\"\n exit_code = 1\n if args.debug:\n raise\n except Exception as e:\n log.error(f\"Unknown error (probably not your fault): {repr(e)}\")\n status_str = f\"{traceback.format_exc()}\\n{repr(e)}\"\n exit_code = 1\n if args.debug:\n raise\n else:\n if result:\n status_str = f\"{result.status}\\n\" + '\\n'.join(result.status_reasons)\n if result.status == SubmissionResult.SUCCESS:\n exit_code = 0\n else:\n exit_code = 1\n else:\n status_str = \"success\"\n exit_code = 0\n \n log.info(f\"Finished. Final status: {status_str}\")\n if exit_code != 0:\n log.info(f\"Rerun with '--debug -v' for more details\")\n log.debug(f\"Exit code: {exit_code}\")\n if args.json_status:\n with open(args.json_status, \"w\") as f:\n f.write(json.dumps(dict(exit_code=exit_code,\n status_str=status_str)))\n\n if student_mode and \"KUBERNETES_PORT_443_TCP_PORT\" in os.environ and not(args.remote or args.run_git_remotely or args.run_by_proxy):\n try:\n os.rename(\"benchmark.csv\", \"meaningless-benchmark.csv\")\n log.note(\"I renamed benchmark.csv because they contain meaningless numbers since you are running dsmlp.\")\n except:\n pass\n\n try:\n os.rename(\"code.csv\", \"meaningless-code.csv\")\n log.note(\"I renamed code.csv because they contain meaningless numbers since you are running dsmlp.\")\n except:\n pass\n \n sys.exit(exit_code)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n","repo_name":"NVSL/cse141pp-archlab","sub_path":"archcloud/src/ArchLab/runlab.py","file_name":"runlab.py","file_ext":"py","file_size_in_byte":21313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38209603875","text":"def square_digits(num):\n # on convertit num en string pour pouvoir le split\n n = str(num)\n # on split n en le stockant dans un tableau\n num = [int(i) for i in n]\n # on déclare notre var pour stocker le résultat\n res = \"\"\n\n # on parcour notre tableau et on ajoute le carré de chaque nombre à notre string res\n for i in num:\n res += str(i ** 2)\n return int(res)","repo_name":"remypnc/Codewars-Github","sub_path":"Step5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13351999737","text":"\"\"\"flavify URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\n# ref: https://stackoverflow.com/questions/7013735/turn-off-caching-of-static-files-in-django-development-server\nfrom django.conf import settings\nfrom django.contrib.staticfiles.views import serve as serve_static\nfrom django.views.decorators.cache import never_cache\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^(?P(robots.txt)|(humans.txt))$', views.home_files, name='home_files'),\n url(r'^about/$', views.about, name='about'),\n url(r'^submissions/', views.submissions, name=\"submissions\"),\n url(r'^ajax_update_ucd', views.ajax_update_ucd, name=\"ajax_update\"),\n url(r'^ajax_select2', views.ajax_select2, name=\"ajax_select2\"),\n url(r'^ingredient/', include('flavors.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^select2/', include('django_select2.urls')),\n url(r'^accounts/', include('allauth.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns.append(url(r'^static/(?P.*)$', never_cache(serve_static)))\n","repo_name":"swpease/Flavify","sub_path":"flavify/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27455788278","text":"import cv2 as cv\nimport numpy as np\nimport time\nfrom ultralytics import YOLO\nimport math \nimport multiprocessing\nimport os\n\nmodel = YOLO(\"yolo-Weights/yolov8n.pt\")\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nclassNames = [\"person\", \"bicycle\", \"car\", \"motorbike\", \"aeroplane\", \"bus\", \"train\", \"truck\", \"boat\",\n \"traffic light\", \"fire hydrant\", \"stop sign\", \"parking meter\", \"bench\", \"bird\", \"cat\",\n \"dog\", \"horse\", \"sheep\", \"cow\", \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"backpack\", \"umbrella\",\n \"handbag\", \"tie\", \"suitcase\", \"frisbee\", \"skis\", \"snowboard\", \"sports ball\", \"kite\", \"baseball bat\",\n \"baseball glove\", \"skateboard\", \"surfboard\", \"tennis racket\", \"bottle\", \"wine glass\", \"cup\",\n \"fork\", \"knife\", \"spoon\", \"bowl\", \"banana\", \"apple\", \"sandwich\", \"orange\", \"broccoli\",\n \"carrot\", \"hot dog\", \"pizza\", \"donut\", \"cake\", \"chair\", \"sofa\", \"pottedplant\", \"bed\",\n \"diningtable\", \"toilet\", \"tvmonitor\", \"laptop\", \"mouse\", \"remote\", \"keyboard\", \"cell phone\",\n \"microwave\", \"oven\", \"toaster\", \"sink\", \"refrigerator\", \"book\", \"clock\", \"vase\", \"scissors\",\n \"teddy bear\", \"hair drier\", \"toothbrush\"\n ]\n\ndef detect_objects(img):\n # Your YOLOv8 object detection code here for a single frame\n # Perform object detection on the frame\n # Return the detected objects\n detected_objects = [] # Placeholder for detected objects\n\n results = model(img, stream=True, imgsz=1920)\n for r in results:\n for box in r.boxes:\n x1, y1, x2, y2 = box.xyxy[0]\n cls = int(box.cls[0])\n dict = {}\n dict['label'] = classNames[cls]\n dict['x1'] = int(x1)\n dict['y1'] = int(y1)\n dict['x2'] = int(x2)\n dict['y2'] = int(y2)\n detected_objects.append(dict)\n\n return detected_objects\n\ndef process_frames(frames, num_cores):\n with multiprocessing.Pool(processes=num_cores) as pool: \n results = pool.map(detect_objects, frames)\n return results\n\n\ndef display_realtime_results(output_frame):\n cv.namedWindow('Object Detection', cv.WINDOW_NORMAL)\n while True:\n if not output_frame.empty():\n frames, detected_objects = output_frame.get()\n for frame, objs in zip(frames, detected_objects):\n for obj in objs:\n # Draw detected objects on the frame\n cv.rectangle(frame, (obj['x1'], obj['y1']), (obj['x2'], obj['y2']), (0, 255, 0), 2)\n cv.putText(frame, obj['label'], (obj['x1'], obj['y1'] - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n # Display the frame with detected objects in real-time\n cv.imshow('Object Detection', frame)\n if cv.waitKey(1) == ord('q'):\n break\ndef main():\n cap = cv.VideoCapture(0) # Initialize your webcam or video source\n output_frame = multiprocessing.Queue()\n\n process = multiprocessing.Process(target=display_realtime_results, args=(output_frame,))\n process.start()\n num_cores = 8\n frames = []\n while True:\n ret, frame = cap.read() # Read a frame from the webcam\n if not ret:\n break\n frames.append(frame)\n\n if len(frames) >= 1: # Process frames in batches of 4\n detected_objects = process_frames(frames, num_cores)\n output_frame.put((frames, detected_objects))\n frames = []\n\n cap.release()\n cv.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SamyVCT/SEDRO","sub_path":"codes/yolos/yolo-multiprocessing.py","file_name":"yolo-multiprocessing.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31228739145","text":"'''\r\nFile: COVIDZejunDatagraphs.py\r\nAuthor: Zejun Li\r\nPurpose: This file contains 12 different functions to make 5 different graphs about the COVID 19 in Idaho\r\n'''\r\nimport pandas as pd, numpy as np, matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdates\r\nimport datetime\r\nimport datetime as dt\r\n\r\ndef get_df():\r\n '''\r\n This function is to get the dataframe from the csv file : data_table_for_daily_death_trends__idaho.csv\r\n '''\r\n fname = \"data_table_for_daily_death_trends__idaho.csv\"\r\n df = pd.read_csv(fname,sep=',', skiprows = 2, engine='python')\r\n del df[\"State\"]\r\n df[\"Dates\"] = np.nan\r\n def date_convert(date_to_convert):\r\n return datetime.datetime.strptime(date_to_convert, '%b %d %Y').strftime('%m/%d/%Y')\r\n df['Dates'] = df['Date'].apply(date_convert)\r\n del df[\"Date\"]\r\n return df\r\n\r\ndef get_date_lst():\r\n '''This function is to get all of the dates from the Dates column\r\n '''\r\n df = get_df()\r\n lst_dates = []\r\n for i in df['Dates']:\r\n lst_dates.append(i)\r\n return lst_dates\r\n\r\ndef fig1():\r\n '''This function is to make a line graph with x axis of Dates and y axis of Current Hospitalized COVID-19 Patients. \r\n '''\r\n df = get_df()\r\n lst_dates = get_date_lst()\r\n x = [dt.datetime.strptime(d,'%m/%d/%Y').date() for d in lst_dates]\r\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))\r\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))\r\n \r\n plt.plot(x,df['Current Hospitalized COVID-19 Patients'])\r\n plt.gcf().autofmt_xdate()\r\n plt.xlabel(\"Dates\")\r\n plt.ylabel(\"Current Hospitalized COVID-19 Patients\")\r\n plt.suptitle('Figure 1', fontsize=16)\r\n \r\n\r\ndef fig2():\r\n '''This function is to make a bar chart with x axis of Dates and y axis of New Deaths\r\n '''\r\n df = get_df()\r\n lst_dates = get_date_lst()\r\n plt.figure(figsize=(10,10))\r\n plt.style.use('ggplot')\r\n lst_dates = []\r\n for i in df['Dates']:\r\n lst_dates.append(i)\r\n x = [dt.datetime.strptime(d,'%m/%d/%Y').date() for d in lst_dates]\r\n lst = []\r\n for i in df['New Deaths']:\r\n lst.append(i)\r\n x_pos = [i for i, _ in enumerate(x)]\r\n plt.bar(x,lst,width=0.8, color='darkviolet')\r\n plt.xlabel(\"Dates\")\r\n plt.ylabel(\"New Deaths\")\r\n plt.suptitle('Figure 2', fontsize=16)\r\n \r\ndef fig3():\r\n '''This function is to make a scatter plot with x axis of Dates and y axis of 7-Day Moving Avg\r\n '''\r\n df = get_df()\r\n plt.figure(figsize=(16,10), dpi= 80)\r\n lst_dates = get_date_lst()\r\n lst = []\r\n for i in df[\"7-Day Moving Avg\"]:\r\n lst.append(i)\r\n int_lst = []\r\n for i in range(len(lst_dates)):\r\n int_lst.append(i)\r\n\r\n x = np.array(lst_dates)\r\n y = np.array(lst)\r\n x1 = np.array(int_lst)\r\n m, b = np.polyfit(x1, y, 1)\r\n plt.plot(x, m*x1 + b)\r\n plt.scatter(x, y)\r\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))\r\n plt.xlabel(\"Dates\")\r\n plt.ylabel(\"7-Day Moving Avg\")\r\n plt.gca().invert_xaxis()\r\n plt.suptitle('Figure 3', fontsize=16)\r\n\r\ndef main(): \r\n fig1()\r\n fig2()\r\n fig3()\r\n plt.show()\r\nmain() \r\n\r\ndef csv(file):\r\n '''\r\n This function is to get two dataframes from the csv file; df: data_table_for_daily_case_trends__idaho1.csv; df2:data_table_for_daily_death_trends__idaho2.csv\r\n '''\r\n df = pd.read_csv(file, sep = \",\", skiprows = 2)\r\n df2 = pd.read_csv(\"data_table_for_daily_death_trends__idaho2.csv\", sep = \",\" , skiprows = 2)\r\n df[\"New Deaths\"] = df2[\"New Deaths\"]\r\n df[\"Doses Per Day\"] = 0\r\n df[\"Dates\"] = df[\"Date\"].replace({\"Jan\":\"01\", \"Feb\":\"02\",\"Mar\":\"03\",\"Apr\":\"04\",\"May\":\"05\",\"Jun\":\"06\",\"Jul\":\"07\",\"Aug\":\"08\",\"Sep\":\"09\",\"Oct\":\"10\",\"Nov\":\"11\",\"Dec\":\"12\"}, regex = True)\r\n df[\"Total Doses Administered\"] = df[\"Total Doses Administered\"].fillna(0)\r\n for i in range(1, len(df[\"Total Doses Administered\"])-1):\r\n a = pd.to_numeric(df[\"Total Doses Administered\"])\r\n df.loc[i-1,\"Doses Per Day\"] = abs((int(a.iloc[i-1]) - int(a.iloc[i])))\r\n a.append(df[\"Doses Per Day\"])\r\n\r\n df.drop(labels = [0], axis = 0)\r\n df.drop([0, 1, 2], axis = 0,inplace = True)\r\n del df[\"7-Day Moving Avg\"]\r\n del df[\"State\"]\r\n return df\r\n\r\ndef clean_dose():\r\n '''This function is to delete the dates that don't have dose\r\n '''\r\n df = csv(\"data_table_for_daily_case_trends__idaho1.csv\")\r\n for i in range(626,670):\r\n df = df.drop(index=i)\r\n return df\r\n\r\n\r\ndef fig4():\r\n '''This function is to make a line graph with x axis of Dates and y axis of New cases\r\n '''\r\n df = csv(\"data_table_for_daily_case_trends__idaho1.csv\")\r\n x = [dt.datetime.strptime(d,'%m %d %Y').date() for d in df[\"Dates\"]]\r\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m %d %Y'))\r\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))\r\n plt.plot(x,df['New Cases'])\r\n plt.gcf().autofmt_xdate()\r\n plt.xlabel(\"Dates\")\r\n plt.ylabel(\"New Cases\")\r\n plt.suptitle('Figure 4', fontsize=16)\r\n\r\n'''\r\ndef fig5():\r\n \r\n df = csv(\"data_table_for_daily_case_trends__idaho1.csv\")\r\n plt.figure(figsize=(10,10))\r\n plt.style.use('ggplot')\r\n lst_dates = []\r\n for i in df['Dates']:\r\n lst_dates.append(i)\r\n x = [dt.datetime.strptime(d,'%m %d %Y').date() for d in df[\"Dates\"]]\r\n lst = []\r\n for i in df['New Deaths']:\r\n lst.append(i)\r\n x_pos = [i for i, _ in enumerate(x)]\r\n plt.bar(x,lst,width=0.8, color='black')\r\n plt.xlabel(\"Dates\")\r\n plt.ylabel(\"New Deaths\")\r\n plt.suptitle('Figure 5', fontsize=16)\r\n'''\r\n\r\ndef fig5():\r\n '''This function is to make a bar chart with x axis of Dates and y axis of Doses Per Day \r\n '''\r\n df = clean_dose()\r\n plt.figure(figsize=(16,10), dpi= 80)\r\n lst = []\r\n for i in df[\"Doses Per Day\"]:\r\n lst.append(i)\r\n x = np.array(df[\"Dates\"])\r\n y = np.array(lst)\r\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))\r\n plt.bar(x,lst,width=0.8, color='navy')\r\n plt.xlabel(\"Dates\")\r\n plt.ylabel(\"Doses Per Day\")\r\n plt.gca().invert_xaxis()\r\n plt.suptitle('Figure 5', fontsize=16)\r\n\r\ndef main2():\r\n\r\n fig4()\r\n #fig5()\r\n fig5()\r\n plt.show()\r\nmain2()\r\n","repo_name":"luisflores0330/ista131final","sub_path":"src/COVIDZejunDatagraphs.py","file_name":"COVIDZejunDatagraphs.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69959557673","text":"import sys\r\ninput = sys.stdin.readline\r\nn = int(input()) # 계단 수\r\n\r\n# 계단의 개수는 300이하의 자연수\r\nstair = [0] * 301 # 점수의 합 \r\ns = [0] * 301 # 점수\r\n\r\nfor i in range(n):\r\n s[i] = int(input()) # 점수 리스트에 입력된 점수 값 저장\r\n\r\nstair[0] = s[0] # 1 # 점수 리스트의 첫번째 값\r\n\r\nstair[1] = s[0] + s[1] # 1 -> 2 # 첫번째 점수와 두번째 점수의 합\r\n\r\n# 최댓값(두번째 점수와 세번째 점수의 합(연속), 첫번째 점수와 세번째 점수의 합(건너뜀))\r\nstair[2] = max(s[1] + s[2], s[0] + s[2]) # 2 -> 3, 1-> 3\r\n\r\n# 연속 stair[i] = stair[i-3] + s[i-1] + s[i] (3번 연속되면 안됨)\r\n# 건너뜀 stair[i] = stair[i-2] + s[i]\r\n\r\n# stair[3] = max(stair[0] + s[2] + s[3], stair[1]+ s[3]) # 1 -> 3 -> 4, 2 -> 4\r\n# stair[4] = max(stair[1] + s[3] + s[4], stair[2]+ s[4]) # 2 -> 4 -> 5, 3 -> 5\r\n\r\n\r\nfor i in range(3,n):\r\n stair[i] = max(stair[i-3] + s[i-1] + s[i], stair[i-2] + s[i])\r\n\r\nprint(stair[n-1]) # 인덱스 값이니까 n-1","repo_name":"kanghaeven/Algorithm","sub_path":"백준/Silver/2579. 계단 오르기/계단 오르기.py","file_name":"계단 오르기.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"710282963","text":"#! /usr/bin/env python3.7\n\nimport argparse\nimport csv\nimport hashlib\nimport logging\n\nimport pandas as pd\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\n\n\ndef load_fna(file_name: str):\n with open(file_name, 'r') as fna_file:\n return next(SeqIO.parse(fna_file, 'fasta'))\n\n\ndef load_potentials(file_name: str):\n potentials = []\n for fasta in SeqIO.parse(open(file_name), 'fasta'):\n desc = str(fasta.description)\n seq = str(fasta.seq)\n if 'VNN*' in seq:\n start_pot = int(desc.split('#')[1].strip())\n end_pot = int(desc.split('#')[2].strip())\n potentials.append({'Start': start_pot, 'End': end_pot, 'Type': 'pp1ab'})\n if 'FAV*' in seq:\n start_pot = int(desc.split('#')[1].strip())\n end_pot = int(desc.split('#')[2].strip())\n potentials.append({'Start': start_pot, 'End': end_pot, 'Type': 'pp1a'})\n potentials_df = pd.DataFrame(potentials)\n return potentials_df\n\n\ndef load_references(ref_path: str):\n logging.info(\"Loading references\")\n\n ref_protein = []\n with open(ref_path, 'r') as fasta_file:\n for record in SeqIO.parse(fasta_file, 'fasta'):\n md5 = hashlib.md5(str(record.seq).encode('utf-8')).hexdigest()\n name_field = record.description\n discard, uniprot_id, long_name = name_field.split('|')\n ref_protein.append({\n 'md5': md5,\n 'uniprot_id': uniprot_id.rstrip(),\n 'long_name': long_name,\n 'seq': str(record.seq),\n 'len': len(record.seq)\n })\n ref_seqs = pd.DataFrame(ref_protein)\n\n return ref_seqs\n\n\ndef get_indices(df):\n start_index = -1\n end_index = -1\n\n if ('pp1ab' in df['Type'].values):\n row = df.loc[df['Type'] == 'pp1ab'].iloc[0]\n end_index = int(row['End'])\n start_index = end_index - 21289\n elif ('pp1a' in df['Type'].values):\n row = df.loc[df['Type'] == 'pp1a'].iloc[0]\n end_index = int(row['End'])\n start_index = end_index - 13218\n end_index = start_index + 21289\n else:\n logging.info(\"No evidence of pp1ab detected by prodigal\")\n return start_index, end_index\n\n\ndef get_full_dna(start_index: int, end_index: int, seq: str):\n # check if genome has 1 or 2 annotations of pp1ab (prodigal output?)\n # if 1 annotaion - match annotaion to genome sequence, start index = start of match - 13502, stop index = end of annotated section\n # if 2 annotations - match annotations to genome sequence, order first and second matches, start index = first start, stop_index = second end\n # correctness checks - Find ATG in seq and set to start_index, then find ATG (beg) and TAA(end)\n # pp1ab RNA sequence = genome sequence[start_index : stop_index]\n\n # 0 = no pp1ab found\n seq = seq.upper()\n if (seq[start_index: start_index + 3] != 'ATG') and ('ACT' in seq[start_index: end_index]):\n pos = (seq.index('ACT', start_index, end_index)) - start_index\n start_index = start_index + pos\n\n while seq[start_index: start_index + 3] != 'ATG':\n start_index = start_index - 1\n while seq[end_index - 3: end_index] != 'TAA':\n end_index = end_index + 1\n dna_seq = seq[start_index: end_index]\n\n return dna_seq, start_index, end_index\n\n\ndef find_slippage(seq):\n set_N = set(\n ['AAA', 'BBB', 'CCC', 'DDD', 'GGG', 'HHH', 'KKK', 'MMM', 'NNN', 'RRR', 'SSS', 'TTT', 'UUU', 'VVV', 'WWW',\n 'YYY'])\n set_W = set(['AAA', 'UUU'])\n set_Z = set(['A', 'B', 'C', 'D', 'H', 'K', 'M', 'N', 'R', 'S', 'T', 'U', 'V', 'W', 'Y'])\n\n if (len(seq) < 7):\n return -1\n for i in range(0, len(seq) - 6):\n isSlippage = ((seq[i:i + 3] in set_N) and (seq[i + 3:i + 6] in set_W) and (seq[i + 6] in set_Z))\n if (isSlippage):\n return i\n\n return -1\n\n\ndef slippage_dna(dna_seq: str, pp1a_start: int, pp1a_end: int):\n if len(dna_seq) > 13219:\n pp1a_length = (pp1a_end - pp1a_start)\n pp1a_dna = dna_seq[:pp1a_length - 3]\n slippage_index = find_slippage(dna_seq)\n if slippage_index != -1:\n pp1b_dna = dna_seq[:slippage_index] + dna_seq[slippage_index - 1] + dna_seq[slippage_index:]\n pp1b_dna = pp1b_dna[pp1a_length-3:]\n pp1ab_dna = pp1a_dna + pp1b_dna\n return pp1ab_dna\n else:\n logging.info('No Slippage site found')\n\n else:\n logging.info('DNA sequence found is not long enough')\n\ndef translate(dna_seq: str):\n dna_seq = dna_seq.replace(\"T\", \"U\")\n\n if len(dna_seq) > 13219:\n pp1a_dna = dna_seq[:13218]\n pp1a_prot = str(Seq(pp1a_dna).translate())\n\n slippage_index = find_slippage(dna_seq)\n if slippage_index != -1:\n pp1b_dna = dna_seq[:slippage_index] + dna_seq[slippage_index - 1] + dna_seq[slippage_index:]\n pp1b_prot = str(Seq(pp1b_dna).translate(table=1))[4405:]\n\n pp1ab_prot = pp1a_prot + pp1b_prot\n\n return pp1ab_prot\n else:\n logging.info('No Slippage site found')\n else:\n pp1a_prot = str(Seq(dna_seq).translate())\n logging.info('DNA sequence found is not long enough')\n\n return (pp1a_prot)\n \ndef get_prodigal_list_clean(coord_list, start_index, end_index):\n clean_list = []\n for coord in str(coord_list).split('\\n'):\n if '>' in coord:\n temp = str(coord).split('_')\n if not (int(temp[1]) >= start_index and int(temp[2]) <= end_index):\n clean_list.append(coord)\n return clean_list\n\ndef clean_pp1a(start, end, seq):\n seq = seq.upper()\n if (seq[start: start + 3] != 'ATG') and ('ACT' in seq[start: end]):\n pos = (seq.index('ACT', start, end)) - start\n start = start + pos\n\n while seq[start: start + 3] != 'ATG':\n start = start - 1\n while seq[end - 3: end] != 'TAA':\n end = end + 1\n return start, end\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--genome_file',\n dest='genome_file',\n type=str,\n required=True,\n help='Assembled genome file in FASTA format')\n parser.add_argument('--prodigal_potentials',\n dest='prodigal_potentials',\n type=str,\n required=True,\n help='Potential proteins coordinates and scores output by Prodigal')\n parser.add_argument('--accession',\n dest='accession',\n type=str,\n required=True,\n help='Genome accession')\n parser.add_argument('--output_dir',\n dest='output_dir',\n type=str,\n required=False,\n default='./',\n help=\"Base path for output files including log file and any desired protein outputs\")\n parser.add_argument('--prodigal_output',\n dest='prodigal_output',\n type=str,\n required=True,\n help=\"Coordinates from prodigal\")\n\n args = parser.parse_args()\n\n prodigal_output = args.prodigal_output\n\n fna_file_name = args.genome_file\n accession = args.accession\n fna = load_fna(fna_file_name)\n fna_sequence = str(fna.seq)\n\n output_dir = args.output_dir\n\n potentials_file_name = args.prodigal_potentials\n potentials_df = load_potentials(potentials_file_name)\n\n start_index_pot, end_index_pot = get_indices(potentials_df)\n dna_seq, start_idx, end_idx = get_full_dna(start_index_pot, end_index_pot, fna_sequence)\n\n prod_coord_list = get_prodigal_list_clean(prodigal_output, start_idx, end_idx)\n\n pp1a_start, pp1a_end = clean_pp1a(start_idx, start_idx + 13215, fna_sequence)\n dna_corrected = slippage_dna(dna_seq, pp1a_start, pp1a_end)\n\n pp1ab_coord = str(len(prod_coord_list) + 1) + '_' + str(start_idx + 1) + '_' + str(end_idx) + '_+_' + str(dna_corrected)\n pp1a_coord = str(len(prod_coord_list) + 2) + '_' + str(pp1a_start + 1) + '_' + str(pp1a_end) + '_+'\n\n for coord in prod_coord_list:\n print(coord.strip('>'))\n\n print(pp1a_coord)\n print(pp1ab_coord)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IBM/omxware-getting-started","sub_path":"SARS-CoV-2_parser/pp1ab_full_translate.py","file_name":"pp1ab_full_translate.py","file_ext":"py","file_size_in_byte":8340,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"4770009528","text":"\"\"\"Scipt for setting a webhook for a Viber bot.\"\"\"\nimport os\nimport requests\nimport json\nimport logging\nfrom tools import dotenv_definer\n\n# Loading Environment variables\ndotenv_definer()\nURL = os.getenv('URL')\n\nlogger = logging.getLogger()\nlogger.setLevel(os.getenv(\"LOG_LEVEL\"))\n\n# Setting up webhook parameters\nauth_token = os.getenv(\"TOKEN\")\nhook = 'https://chatapi.viber.com/pa/set_webhook'\nheaders = {'X-Viber-Auth-Token': auth_token}\nbody = dict(url=URL,\n event_types=['unsubscribed',\n 'conversation_started',\n 'message',\n 'seen',\n 'delivered'])\n\n# Sending POST request to apply a webhook, and printing results\nr = requests.post(hook, json.dumps(body), headers=headers)\nlogger.info(r.json())\n","repo_name":"kirichk/viber-sushi-bot","sub_path":"utils/set_webhook.py","file_name":"set_webhook.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"74859356711","text":"from pathlib import Path\nfrom typing import Sequence, Union\nimport numpy as np\n\nfrom typing import IO\n\n\ndef sentencepiece_load(file):\n \"\"\"Load a SentencePiece model\"\"\"\n from sentencepiece import SentencePieceProcessor\n spm = SentencePieceProcessor()\n spm.Load(str(file))\n return spm\n\n\n# source: https://github.com/allenai/allennlp/blob/master/allennlp/common/file_utils.py#L147 # NOQA\ndef http_get_temp(url: str, temp_file: IO) -> None:\n import requests\n req = requests.get(url, stream=True)\n req.raise_for_status()\n content_length = req.headers.get('Content-Length')\n total = int(content_length) if content_length is not None else None\n try:\n from tqdm import tqdm\n progress = tqdm(unit=\"B\", total=total)\n except ImportError:\n progress = None\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n if progress is not None:\n progress.update(len(chunk))\n temp_file.write(chunk)\n if progress is not None:\n progress.close()\n return req.headers\n\n\n# source: https://github.com/allenai/allennlp/blob/master/allennlp/common/file_utils.py#L147 # NOQA\ndef http_get(url: str, outfile: Path, ignore_tardir=False) -> None:\n import tempfile\n import shutil\n with tempfile.NamedTemporaryFile() as temp_file:\n headers = http_get_temp(url, temp_file)\n # we are copying the file before closing it, flush to avoid truncation\n temp_file.flush()\n # shutil.copyfileobj() starts at current position, so go to the start\n temp_file.seek(0)\n outfile.parent.mkdir(exist_ok=True, parents=True)\n if headers.get(\"Content-Type\") == \"application/x-gzip\":\n import tarfile\n tf = tarfile.open(fileobj=temp_file)\n members = tf.getmembers()\n if len(members) != 1:\n raise NotImplementedError(\"TODO: extract multiple files\")\n member = members[0]\n if ignore_tardir:\n member.name = Path(member.name).name\n tf.extract(member, str(outfile.parent))\n extracted_file = outfile.parent / member.name\n assert extracted_file == outfile, \"{} != {}\".format(\n extracted_file, outfile)\n else:\n with open(str(outfile), 'wb') as out:\n shutil.copyfileobj(temp_file, out)\n return outfile\n\n\ndef load_word2vec_file(word2vec_file, add_pad=False, pad=\"\"):\n \"\"\"Load a word2vec file in either text or bin format.\"\"\"\n from gensim.models import KeyedVectors\n word2vec_file = str(word2vec_file)\n binary = word2vec_file.endswith(\".bin\")\n vecs = KeyedVectors.load_word2vec_format(word2vec_file, binary=binary)\n if add_pad:\n if pad not in vecs:\n add_embeddings(vecs, pad)\n else:\n raise ValueError(\"Attempted to add , but already present\")\n return vecs\n\n\ndef add_embeddings(keyed_vectors, *words, init=None):\n import numpy as np\n from gensim.models.keyedvectors import Vocab\n if init is None:\n init = np.zeros\n vectors = keyed_vectors.vectors\n for word in words:\n keyed_vectors.vocab[word] = Vocab(count=0, index=vectors.shape[0])\n keyed_vectors.vectors = np.concatenate([\n vectors, init((1, vectors.shape[1]))])\n keyed_vectors.index2word.append(word)\n return vectors.shape[0]\n\n\nclass CORD19Emb():\n \"\"\"\n Provides phrase and subword embeddings, as well as corresponding\n SentencePiece segmention models, trained on the CORD-19 collection\n of COVID-19 research papers.\n (https://pages.semanticscholar.org/coronavirus-research)\n\n # Examples\n\n Load 100-dim case-sensitive phrase embeddings, trained on the\n 2020-03-20 version of CORD-10 with the SentencePiece 'unigram'\n unsupervised segmentation model and a vocabulary size of 200,000:\n >>> cord19_emb = CORD19Emb('2020-03-20.phrase_emb.cased.unigram.vs200000.d100')\n\n Segment text into subwords, words, and phrases:\n >>> text = 'Faced with the current large-scale public health emergency, collecting, sorting, and analyzing biomedical information related to the \\\"coronavirus\\\" should be done as quickly a\n ...: s possible to gain a global perspective, which is a basic requirement for strengthening epidemic control capacity.'\n >>> cord19_emb.encode(text)\n ['▁faced',\n '▁with▁the▁current',\n '▁large',\n '-',\n 'scale',\n '▁public▁health▁emergency',\n ',',\n '▁collecting',\n ',',\n '▁sorting',\n ',',\n '▁and',\n '▁analyzing',\n '▁biomedical',\n '▁information',\n '▁related▁to▁the',\n '▁\"',\n 'coronavirus',\n '\"',\n '▁should▁be▁done',\n '▁as',\n '▁quickly',\n '▁as▁possible',\n '▁to▁gain',\n '▁a▁global',\n '▁perspective',\n ',',\n '▁which▁is▁a',\n '▁basic',\n '▁requirement▁for',\n '▁strengthening',\n '▁epidemic',\n '▁control',\n '▁capacity',\n '.']\n\n The segmentation is unsupervised and purely based on frequency. Only\n frequent phrases are segmented as such, while less frequent phrases like\n 'epidemic control capacity' are split into words.\n\n When using phrase embeddings ('phrase_emb' in model name), you can\n query similar phrases:\n >>> cord19_emb.most_similar('▁public▁health▁emergency')\n [('▁a▁public▁health▁emergency', 0.6968450546264648),\n ('▁health▁emergency', 0.6321637630462646),\n ('▁preparedness▁and▁response', 0.6275159120559692),\n ('▁public▁health▁emergencies', 0.6274486780166626),\n ('▁in▁the▁event▁of▁a', 0.6201773285865784),\n ('▁emergency▁response', 0.6147885918617249),\n ('▁infectious▁disease▁outbreak', 0.5898448824882507),\n ('▁response▁plan', 0.5878373384475708),\n ('▁disease▁outbreak', 0.5794155597686768),\n ('▁an▁emergency', 0.5759837627410889)]\n\n Another example:\n >>> cord19_emb.most_similar('▁infection▁control▁measures')\n [('▁infection▁control▁practices', 0.8876594305038452),\n ('▁infection▁control▁precautions', 0.8097165822982788),\n ('▁infection▁control', 0.7622669339179993),\n ('▁infection▁control▁procedures', 0.7612308263778687),\n ('▁infection▁prevention▁and▁control', 0.7595920562744141),\n ('▁prevention▁and▁control▁measures', 0.7420486211776733),\n ('▁adherence▁to', 0.7354915142059326),\n ('▁public▁health▁measures', 0.7327624559402466),\n ('▁control▁measures', 0.730279803276062),\n ('▁biosecurity▁measures', 0.728427529335022)]\n\n When using subword embeddings ('subword_emb' in model name),\n you can only query subwords or words:\n >>> cord19_emb.most_similar('▁coronavirus')\n [('▁coronaviruses', 0.8036941289901733),\n ('▁CoV', 0.8004533052444458),\n ('▁Coronavirus', 0.7854971885681152),\n ('▁HCoV', 0.7079704999923706),\n ('CoV', 0.6921805143356323),\n ('▁SARS▁coronavirus', 0.687605082988739),\n ('▁CoVs', 0.6866652965545654),\n ('▁calicivirus', 0.667205274105072),\n ('▁human▁coronavirus', 0.6547725200653076),\n ('▁rotavirus', 0.6540536880493164)]\n\n Byte-pair encode text into IDs for performing an embedding lookup:\n >>> cord19_emb.encode_ids(text)\n >>> ids\n [12910, 62444, 373, 5, 1818, 41265, 3, 9034, ...]\n\n Byte-pair encode and embed text:\n >>> cord19_emb.embed(text).shape\n (35, 300,)\n\n Decode byte-pair-encoded text:\n >>> cord19_emb.decode(['▁infection▁control▁measures', '▁were▁effective'])\n 'infection control measures were effective'\n\n Decode byte-pair IDs:\n >>> cord19_emb.decode_ids([14565, 61440])\n 'infection control measures were effective'\n\n\n Parameters\n ----------\n\n model_name: ``str``, required\n Name of a segmentation and embedding model, e.g.:\n '2020-03-20.phrase_emb.uncased.unigram.vs200000.d100'\n cache_dir: ``Path'', optional (default = ``~/.cache/bpemb'')\n The folder in which downloaded model files will be cached.\n preprocess: ``bool'', optional (default = True)\n Whether to preprocess the text or not.\n Set to False if you have preprocessed the text already.\n encode_extra_options: ``str'' (default = None)\n Options that are directly passed to the SentencePiece encoder.\n See SentencePiece documentation for details.\n add_pad_emb: ``bool'', optional (default = False)\n Whether to add a special embedding to the byte pair\n embeddings, thereby increasing the vocabulary size to vs + 1.\n This embedding is initialized with zeros and appended to the end\n of the embedding matrix. Assuming \"cord19_emb\" is a model instance,\n the padding embedding can be looked up with \"cord19_emb['']\", or\n directly accessed with \"cord19_emb.vectors[-1]\".\n \"\"\"\n url_tpl = \"https://github.com/cl-tohoku/CORD-19_phrase_embeddings_files/blob/master/{file_name}?raw=true\"\n emb_tpl = \"CORD-19.{model_name}.w2v.bin\"\n model_tpl = \"CORD-19.{model_name}.model\"\n archive_suffix = \".tar.gz\"\n available_models = None\n\n def __init__(\n self,\n model_name,\n *,\n cache_dir: Path = Path.home() / Path(\".cache/cord19_emb\"),\n preprocess: bool = True,\n encode_extra_options: str = None,\n add_pad_emb: bool = False,\n segmenter_only: bool = False,\n ):\n self.model_name = model_name\n parts = model_name.split('.')\n assert len(parts) == 6, 'invalid model name: ' + model_name\n self.vocab_size = self.vs = int(parts[4][len('vs'):])\n self.dim = int(parts[5][len('d'):])\n self.cache_dir = Path(cache_dir)\n model_name_without_dim = '.'.join(parts[:-1])\n model_file = self.model_tpl.format(\n model_name=model_name_without_dim)\n self.model_file = self._load_file(model_file)\n self.spm = sentencepiece_load(self.model_file)\n if encode_extra_options:\n self.spm.SetEncodeExtraOptions(encode_extra_options)\n emb_file = self.emb_tpl.format(model_name=model_name)\n if not segmenter_only:\n self.emb_file = self._load_file(emb_file, archive=True)\n self.emb = load_word2vec_file(self.emb_file, add_pad=add_pad_emb)\n self.most_similar = self.emb.most_similar\n assert self.dim == self.emb.vectors.shape[1]\n self.do_preproc = preprocess and not '.cased.' in model_name\n self.BOS_str = \"\"\n self.EOS_str = \"\"\n self.BOS = self.spm.PieceToId(self.BOS_str)\n self.EOS = self.spm.PieceToId(self.EOS_str)\n\n def __getitem__(self, key):\n return self.emb.__getitem__(key)\n\n @property\n def vectors(self):\n return self.emb.vectors\n\n def _load_file(self, file, archive=False, cache_dir=None):\n if not cache_dir:\n if hasattr(self, \"cache_dir\"):\n cache_dir = self.cache_dir\n else:\n from tempfile import mkdtemp\n cache_dir = mkdtemp()\n cached_file = Path(cache_dir) / file\n if cached_file.exists():\n return cached_file\n suffix = self.archive_suffix if archive else \"\"\n file_url = self.url_tpl.format(file_name=file)\n print(\"downloading\", file_url)\n return http_get(file_url, cached_file, ignore_tardir=True)\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n \"(model_name={}, vs={}, dim={})\".format(self.model_name, self.vocab_size, self.dim)\n\n def encode(\n self,\n texts: Union[str, Sequence[str]]\n ) -> Union[Sequence[str], Sequence[Sequence[str]]]:\n \"\"\"Encode the supplied texts into byte-pair symbols.\n\n Parameters\n ----------\n texts: ``Union[str, Sequence[str]]'', required\n The text or texts to be encoded.\n\n Returns\n -------\n The byte-pair-encoded text.\n \"\"\"\n return self._encode(texts, self.spm.EncodeAsPieces)\n\n def encode_ids(\n self,\n texts: Union[str, Sequence[str]]\n ) -> Union[Sequence[str], Sequence[Sequence[str]]]:\n \"\"\"Encode the supplied texts into byte-pair IDs.\n The byte-pair IDs correspond to row-indices into the embedding\n matrix.\n\n Parameters\n ----------\n texts: ``Union[str, Sequence[str]]'', required\n The text or texts to be encoded.\n\n Returns\n -------\n The byte-pair-encoded text.\n \"\"\"\n return self._encode(texts, self.spm.EncodeAsIds)\n\n def encode_with_eos(\n self,\n texts: Union[str, Sequence[str]]\n ) -> Union[Sequence[str], Sequence[Sequence[str]]]:\n \"\"\"Encode the supplied texts into byte-pair symbols, adding\n an end-of-sentence symbol at the end of each encoded text.\n\n Parameters\n ----------\n texts: ``Union[str, Sequence[str]]'', required\n The text or texts to be encoded.\n\n Returns\n -------\n The byte-pair-encoded text.\n \"\"\"\n return self.encode(\n texts,\n lambda t: self.spm.EncodeAsPieces(t) + [self.EOS_str])\n\n def encode_ids_with_eos(\n self,\n texts: Union[str, Sequence[str]]\n ) -> Union[Sequence[str], Sequence[Sequence[str]]]:\n \"\"\"Encode the supplied texts into byte-pair IDs, adding\n an end-of-sentence symbol at the end of each encoded text.\n The byte-pair IDs correspond to row-indices into the embedding\n matrix.\n\n Parameters\n ----------\n texts: ``Union[str, Sequence[str]]'', required\n The text or texts to be encoded.\n\n Returns\n -------\n The byte-pair-encoded text.\n \"\"\"\n return self._encode(\n texts,\n lambda t: self.spm.EncodeAsIds(t) + [self.EOS])\n\n def encode_with_bos_eos(\n self,\n texts: Union[str, Sequence[str]]\n ) -> Union[Sequence[str], Sequence[Sequence[str]]]:\n \"\"\"Encode the supplied texts into byte-pair symbols, adding\n a begin-of-sentence and an end-of-sentence symbol at the\n begin and end of each encoded text.\n\n Parameters\n ----------\n texts: ``Union[str, Sequence[str]]'', required\n The text or texts to be encoded.\n\n Returns\n -------\n The byte-pair-encoded text.\n \"\"\"\n return self._encode(\n texts,\n lambda t: (\n [self.BOS_str] + self.spm.EncodeAsPieces(t) + [self.EOS_str]))\n\n def encode_ids_with_bos_eos(\n self,\n texts: Union[str, Sequence[str]]\n ) -> Union[Sequence[str], Sequence[Sequence[str]]]:\n \"\"\"Encode the supplied texts into byte-pair IDs, adding\n a begin-of-sentence and an end-of-sentence symbol at the\n begin and end of each encoded text.\n\n Parameters\n ----------\n texts: ``Union[str, Sequence[str]]'', required\n The text or texts to be encoded.\n\n Returns\n -------\n The byte-pair-encoded text.\n \"\"\"\n return self._encode(\n texts,\n lambda t: [self.BOS] + self.spm.EncodeAsIds(t) + [self.EOS])\n\n def _encode(self, texts, fn):\n if isinstance(texts, str):\n if self.do_preproc:\n texts = self.preprocess(texts)\n return fn(texts)\n if self.do_preproc:\n texts = map(self.preprocess, texts)\n return list(map(fn, texts))\n\n def embed(self, text: str) -> np.ndarray:\n \"\"\"Byte-pair encode text and return the corresponding byte-pair\n embeddings.\n\n Parameters\n ----------\n text: ``str'', required\n The text to encode and embed.\n\n Returns\n -------\n A matrix of shape (l, d), where l is the length of the byte-pair\n encoded text and d the embedding dimension.\n \"\"\"\n ids = self.encode_ids(text)\n return self.emb.vectors[ids]\n\n def decode(\n self,\n pieces: Union[Sequence[str], Sequence[Sequence[str]]]\n ) -> Union[str, Sequence[str]]:\n \"\"\"\n Decode the supplied byte-pair symbols.\n\n Parameters\n ----------\n pieces: ``Union[Sequence[str], Sequence[Sequence[str]]]'', required\n The byte-pair symbols to be decoded.\n\n Returns\n -------\n The decoded byte-pair symbols.\n \"\"\"\n if isinstance(pieces[0], str):\n return self.spm.DecodePieces(pieces)\n return list(map(self.spm.DecodePieces, pieces))\n\n def decode_ids(self, ids):\n \"\"\"\n Decode the supplied byte-pair IDs.\n\n Parameters\n ----------\n ids: ``Union[Sequence[int], Sequence[Sequence[int]]]'', required\n The byte-pair symbols to be decoded.\n\n Returns\n -------\n The decoded byte-pair IDs.\n \"\"\"\n try:\n # try to decode list of lists\n return list(map(self.spm.DecodeIds, ids))\n except TypeError:\n try:\n # try to decode array\n return self.spm.DecodeIds(ids.tolist())\n except AttributeError:\n try:\n # try to decode list of arrays\n return list(map(self.spm.DecodeIds, ids.tolist()))\n except AttributeError:\n # try to decode list\n return self.spm.DecodeIds(ids)\n\n @staticmethod\n def preprocess(text: str) -> str:\n \"\"\"\n Lowercase text when using 'uncased' models.\n\n Parameters\n ----------\n text: ``str'', required\n The text to be preprocessed.\n\n Returns\n -------\n The preprocessed text.\n \"\"\"\n return text.lower()\n\n @property\n def pieces(self):\n return self.emb.index2word\n\n @property\n def words(self):\n return self.pieces\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # the SentencePiece instance is not serializable since it is a\n # SWIG object, so we need to delete it before serializing\n state['spm'] = None\n return state\n\n def __setstate__(self, state):\n # load SentencePiece after the object has been unpickled\n model_file = (\n state[\"cache_dir\"] / state['model_file'].name)\n if not model_file.exists():\n model_rel_path = model_file.name\n model_file = self._load_file(\n str(model_rel_path), cache_dir=state[\"cache_dir\"])\n state['spm'] = sentencepiece_load(model_file)\n self.__dict__ = state\n\n\n__all__ = [CORD19Emb]\n","repo_name":"cl-tohoku/CORD-19_phrase_embeddings","sub_path":"cord19_emb/cord19_emb.py","file_name":"cord19_emb.py","file_ext":"py","file_size_in_byte":18987,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"24880263825","text":"\"\"\"\nUbelt Use Cases\n\nEach use case starts with a modivation and follows with a solution. This makes\nthese cases perfect for presentations.\n\"\"\"\n\n\ndef multiple_items_from_a_dictionary():\n \"\"\"\n Spotlight:\n ubelt.take\n\n Modivation:\n Working with Lists of Dictionaries\n\n Requires:\n kwimage\n \"\"\"\n ...\n\n \"\"\"\n When working with data, a common pattern is to iterate through it, and\n gather information about the work to be done, so a you can make a final\n structured pass through the data.\n\n In python we might do this by initializing an empty list and appending\n **dictionary of information**, to the list. (or you might yield\n dictionaries of information from a generator instead, either way you have a\n flat list).\n\n Some people might use lists of tuples instead of Lists of dictionaries, but\n using dictionaries makes it easy to add new information later (and it works\n very will with pandas).\n \"\"\"\n import ubelt as ub\n import kwimage\n kwimage_test_image_names = ['airport', 'amazon', 'astro', 'carl',\n 'lowcontrast']\n rows = []\n for test_image in kwimage_test_image_names:\n fpath = ub.Path(kwimage.grab_test_image_fpath(test_image))\n imdata = kwimage.imread(fpath)\n row = {\n 'mean': imdata.mean(),\n 'std': imdata.std(),\n 'sum': imdata.sum(),\n 'max': imdata.max(),\n 'min': imdata.min(),\n }\n rows.append(row)\n\n \"\"\"\n For each row, you might want to grab multiple specific items from it.\n\n But having a separate assignment on each row wastes a lot of vertical\n space.\n \"\"\"\n\n for row in rows:\n mean = row['mean']\n std = row['std']\n sum = row['sum']\n min = row['min']\n max = row['max']\n\n \"\"\"\n You might put them one line explicitly, but that wastes a lot of horizontal\n space\n \"\"\"\n for row in rows:\n mean, std, sum, min, max = row['mean'], row['std'], row['sum'], row['min'], row['max']\n\n \"\"\"\n What if we try to be clever? We can use a list comprehension\n \"\"\"\n for row in rows:\n mean, std, sum, min, max = [row[k] for k in ['mean', 'std', 'sum', 'min', 'max']]\n\n \"\"\"\n That's not too bad, but we can do better\n \"\"\"\n for row in rows:\n mean, std, sum, min, max = ub.take(row, ['mean', 'std', 'sum', 'min', 'max'])\n\n \"\"\"\n And now even better:\n \"\"\"\n for row in map(ub.udict, rows):\n mean, std, sum, min, max = row.take(['mean', 'std', 'sum', 'min', 'max'])\n","repo_name":"Erotemic/ubelt","sub_path":"dev/examples/use_cases.py","file_name":"use_cases.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":694,"dataset":"github-code","pt":"72"} +{"seq_id":"23858260285","text":"import pyrealsense2 as rs\nimport numpy as np\nimport cv2\nimport time\nimport os\nfrom datetime import datetime as dt\n\nWIDTH = 1280\nHEIGHT = 720\nFPS = 6\n\nalign = rs.align(rs.stream.color)\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.color, WIDTH, HEIGHT, rs.format.bgr8, FPS)\nconfig.enable_stream(rs.stream.depth, WIDTH, HEIGHT, rs.format.z16, FPS)\n\nnow = dt.now()\nnowstr = now.strftime('%Y-%m-%d-%H')\nfloder = '/home/pi/data'\nos.makedirs(floder, exist_ok=True)\nfile_name = '/home/pi/data/d435data.bag'\nconfig.enable_record_to_file(file_name)\nprof = pipeline.start(config)\n#dev = prof.get_device().as_recorder()\n\ntry:\n time0 = time.time()\n time_delay = 60\n while True:\n frames = pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n depth_frame = frames.get_depth_frame()\n if not color_frame or not depth_frame:\n continue\n '''\n color_image = np.asanyarray(color_frame.get_data())\n cv2.namedWindow('confirm_video_window', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('confirm_video_window', color_image)\n c = cv2.waitKey(1)\n\n if cv2.getWindowProperty('confirm_video_window', cv2.WND_PROP_AUTOSIZE) < 1:\n break\n if c == 27:\n cv2.destroyAllWindows()\n break\n \n #dev.pause()\n #time.sleep(0.6)\n #dev.resume()\n '''\n if (time.time() - time0) >= time_delay:\n print(\"---------------------finish!----------------------------\")\n break\n\nfinally:\n # Stop streaming\n #time.sleep(5)\n pipeline.stop()\n","repo_name":"MyDuan/computer_version_using_realsense","sub_path":"save_data_with_depth.py","file_name":"save_data_with_depth.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18242893266","text":"import environment\nfrom keras.models import Model\nfrom keras.layers import Input, Flatten, Dense, Concatenate\nfrom rl.callbacks import ModelIntervalCheckpoint\nfrom tensorflow.keras.optimizers import Adam\nfrom rl.agents import DDPGAgent, DQNAgent\nfrom rl.memory import SequentialMemory\nfrom rl.random import OrnsteinUhlenbeckProcess\nfrom rl.policy import BoltzmannQPolicy\nfrom rl.callbacks import Callback as KerasCallback\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport os\nfrom typing import Tuple, Optional\nfrom keras.utils.io_utils import path_to_string\n\n\nclass Callback(KerasCallback):\n model: DQNAgent\n\n def __init__(\n self, filepath, monitor=\"episode_reward\", verbose=0, save_best_only=True\n ):\n super().__init__()\n\n self.monitor = monitor\n self.verbose = verbose\n self.filepath = path_to_string(filepath)\n self.save_best_only = save_best_only\n\n self.best: float = -np.Inf\n self.total_steps: int = 0\n self.total_episodes: int = 0\n\n def _set_env(self, env):\n self.env = env\n\n def on_action_end(self, action, logs={}):\n pass\n\n def on_step_end(self, steps, logs={}):\n self.total_steps += 1\n\n def on_episode_end(self, episode, logs={}):\n \"\"\"Called at end of each episode\"\"\"\n self.total_episodes += 1\n self._save_model(episode=episode, logs=logs)\n\n def _save_model(self, episode, logs: dict):\n filepath = self.filepath.format(step=self.total_steps, episode=episode, **logs)\n if self.save_best_only:\n current = logs.get(self.monitor)\n if current is None:\n print(\"%s is not included in\", self.monitor)\n print(logs.keys())\n else:\n if self.best < current:\n if self.verbose > 0:\n print(\n f\"\\nepisode {episode}: {self.monitor} improved from {self.best:.5f} to {current:.5f}, saving model to {filepath}\"\n )\n self.best = current\n self.model.save_weights(filepath, overwrite=True)\n else:\n if self.verbose > 1:\n print(\n \"\\nyour latest score {current:.5f} is lower than {self.best} at {self.monitor}. See you next time.\"\n )\n else:\n if self.verbose > 0:\n print(\"\\neposode {episode}: saving model to {filepath}\")\n self.model.save_weights(filepath, overwrite=True)\n\n\nclass DQNRunner:\n window_length = 1\n\n def __init__(\n self, folder_path: Optional[str] = None, file_path: Optional[str] = None\n ):\n if folder_path is None:\n assert file_path is not None\n self.file_path = file_path\n self.folder_path = os.path.dirname(self.file_path) + os.sep\n else:\n self.folder_path = folder_path\n\n self.env = environment.UnicycleEnv()\n self.observation_shape: Tuple = (self.window_length,) + self.env.observation_space.shape # type: ignore\n self.nb_actions: int = self.env.action_space.n # type: ignore\n\n self._prepare()\n\n def fit(self, nb_steps: int, nb_max_episode_steps: int = 5000):\n if not os.path.exists(self.folder_path):\n os.mkdir(self.folder_path)\n if self.folder_path[-1] != os.sep:\n self.folder_path += os.sep\n\n callback = Callback(self.folder_path + \"weight_{episode:06d}.h5\", verbose=1)\n histories = self.dqn.fit(\n self.env,\n nb_steps=nb_steps,\n nb_max_episode_steps=nb_max_episode_steps,\n verbose=1,\n callbacks=[callback],\n )\n\n self.dqn.save_weights(self.folder_path + \"weight_last.h5f\", overwrite=True)\n\n self._plot(histories)\n\n def test(self, nb_episodes=1):\n self.dqn.load_weights(self.file_path)\n self.env.start_recording()\n history = self.dqn.test(\n self.env, nb_episodes=nb_episodes, verbose=0, visualize=False\n )\n self.env.save_recording(self.folder_path + \"result.gif\", time_scale=1)\n self._plot(history, \"test\")\n\n def _create_model(self, neuron_num=16, layer_num=3, activation=\"relu\"):\n input_layer = Input(shape=self.observation_shape)\n c = Flatten()(input_layer)\n for _ in range(layer_num):\n c = Dense(neuron_num, activation=activation)(c)\n c = Dense(self.nb_actions, activation=\"linear\")(c)\n model = Model(input_layer, c)\n print(model.summary())\n return model\n\n def _prepare(self):\n memory = SequentialMemory(limit=10000, window_length=self.window_length)\n policy = BoltzmannQPolicy()\n self.dqn = DQNAgent(\n model=self._create_model(),\n nb_actions=self.nb_actions,\n memory=memory,\n nb_steps_warmup=50,\n target_model_update=1e-2,\n policy=policy,\n )\n self.dqn.compile(Adam(lr=1e-3), metrics=[\"mae\"])\n\n def _plot(self, histories, filename=\"result\"):\n histories = histories.history\n with open(self.folder_path + filename + \".txt\", \"w\") as f:\n f.write(\"nb_step episode_reward\\n\")\n for i in range(len(histories[\"episode_reward\"])):\n string = f'{histories[\"nb_steps\"][i]} {histories[\"episode_reward\"][i]}'\n f.write(string + \"\\n\")\n print(string)\n plt.plot(\n np.arange(len(histories[\"episode_reward\"])), histories[\"episode_reward\"]\n )\n plt.xlabel(\"episode\")\n plt.ylabel(\"reward\")\n plt.savefig(self.folder_path + filename + \".png\")\n plt.show()\n\n\nclass Runner:\n window_length = 1\n\n def __init__(self, use_gui=True, time_wait=None, record=False):\n self.env = environment.UnicycleEnv(\n visualize=use_gui, time_wait=time_wait, record=record\n )\n self.observation_shape = (self.window_length,) + self.env.observation_space.shape # type: ignore\n self.nb_actions: int = self.env.action_space.shape[0] # type: ignore\n\n def _get_actor_model(self):\n neuron_num = 100\n input_layer = Input(shape=self.observation_shape) # type: ignore\n c = Flatten()(input_layer)\n c = Dense(neuron_num, activation=\"tanh\")(c)\n for _ in range(6):\n c = Dense(neuron_num, activation=\"relu\")(c)\n c = Dense(self.nb_actions, activation=\"tanh\")(c) # 出力は連続値なので、linearを使う\n model = Model(input_layer, c)\n print(model.summary())\n return model\n\n def _get_critic_model(self):\n neuron_num = 200\n action_input = Input(shape=(self.nb_actions,), name=\"action_input\")\n observation_input = Input(\n shape=self.observation_shape, name=\"observation_input\"\n )\n flattened_observation = Flatten()(observation_input)\n\n input_laer = Concatenate()([action_input, flattened_observation])\n c = Dense(neuron_num, activation=\"swish\")(input_laer)\n for _ in range(6):\n c = Dense(neuron_num, activation=\"relu\")(c)\n c = Dense(1, activation=\"linear\")(c)\n critic = Model(inputs=[action_input, observation_input], outputs=c)\n print(critic.summary())\n return critic, action_input\n\n def prepare(self):\n # Experience Bufferの設定\n memory = SequentialMemory(limit=50000, window_length=self.window_length)\n # 行動選択時に加えるノイズ(探索のため)\n # 平均回帰課程を用いている。単純にノイズがmuに収束している\n # ここでは、mu=0に設定しているので、ノイズは0になっていく。つまり、探索を行わなくなる。\n random_process = OrnsteinUhlenbeckProcess(\n size=self.nb_actions, theta=0.15, mu=0.0, sigma=0.3\n )\n\n actor = self._get_actor_model()\n critic, action_input = self._get_critic_model()\n\n self.agent = DDPGAgent(\n nb_actions=self.nb_actions,\n actor=actor,\n critic=critic,\n critic_action_input=action_input,\n memory=memory,\n nb_steps_warmup_actor=100,\n nb_steps_warmup_critic=100,\n random_process=random_process,\n gamma=0.99,\n target_model_update=1e-3,\n )\n\n # agent.compile(Adam(lr=.0001, clipnorm=1.), metrics=['mae'])\n self.agent.compile(Adam())\n\n def learn(self, folder_name: str, step_per_episode: int, nb_steps: int):\n if folder_name[-1] != os.sep:\n folder_name = folder_name + os.sep\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n print(\"saving to\", folder_name)\n\n callback = ModelIntervalCheckpoint(\n folder_name + \"checkpoint_{step:010d}_{reward:2.2f}.h5\",\n interval=1000,\n verbose=0,\n )\n\n histories = self.agent.fit(\n self.env,\n nb_steps=nb_steps,\n visualize=False,\n verbose=1,\n nb_max_episode_steps=step_per_episode,\n callbacks=[callback],\n )\n\n self.agent.save_weights(folder_name + \"ddpg_weights\", overwrite=True)\n\n histories = histories.history\n print(histories)\n plt.plot(\n np.arange(len(histories[\"episode_reward\"])), histories[\"episode_reward\"]\n )\n plt.xlabel(\"step\")\n plt.ylabel(\"reward\")\n plt.savefig(folder_name + \"result.png\")\n plt.show()\n\n with open(folder_name + \"result.txt\", \"w\") as f:\n for i in range(len(histories[\"episode_reward\"])):\n string = f'{histories[\"nb_steps\"][i]} {histories[\"episode_reward\"][i]}'\n f.write(string + \"\\n\")\n print(string)\n\n def trial(self):\n result = self.agent.test(self.env, nb_episodes=1, visualize=False)\n print(result)\n\n def save_gif(self, gif_path):\n if gif_path[-1] != os.sep:\n gif_path = gif_path + os.sep\n if not os.path.exists(gif_path):\n os.mkdir(gif_path)\n print(\"saving to\", gif_path + \"result.gif\")\n self.env.save_img(gif_path + \"result.gif\")\n\n def load_weights(self, filepath):\n self.agent.load_weights(filepath)\n\n\nclass Tester:\n def __init__(self):\n self.env = environment.UnicycleEnv()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--learn\", help=\"重みなどを保存するフォルダ名\")\n # parser.add_argument(\n # \"--step-per-episode\", help=\"エピソードあたりのステップ\", default=500, type=int\n # )\n parser.add_argument(\"--nb-steps\", help=\"総ステップ数\", default=100000, type=int)\n # parser.add_argument(\"--no-gui\", help=\"GUIを表示するか\", default=False)\n parser.add_argument(\"--test\", help=\"重みをロードするファイル名\")\n # parser.add_argument(\"--save-gif\", help=\"gifファイルを保存するフォルダ名\", default=None)\n\n args = parser.parse_args()\n\n if args.learn is None and args.test is None:\n args.learn = \"new\"\n\n runner = DQNRunner(folder_path=args.learn, file_path=args.test)\n if args.learn is not None:\n runner.fit(args.nb_steps)\n else:\n runner.test()\n return\n\n no_gui = args.no_gui == False\n record = args.save_gif is not None\n if args.trial is None:\n runner = Runner(use_gui=no_gui)\n runner.prepare()\n runner.learn(args.learn, args.step_per_episode, args.nb_steps)\n else:\n runner = Runner(time_wait=0.1, use_gui=no_gui, record=record)\n runner.prepare()\n runner.load_weights(args.trial)\n runner.trial()\n if record == True:\n runner.save_gif(args.save_gif)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"2lu3/rl-unicycle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6464588388","text":"import nltk\nimport numpy as np\nimport pandas as pd\nfrom nltk import pos_tag\nfrom nltk.corpus import wordnet, stopwords\nfrom nltk.stem import snowball, WordNetLemmatizer\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.metrics import classification_report, confusion_matrix, f1_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\n\n#nltk.download('stopwords')\n#nltk.download('punkt')\n\ndef plot_coefficients(classifier_coefs, feature_names, top_features=20, show_neg = True):\n #coef = classifier.coef_.ravel()\n coef = classifier_coefs.ravel()\n top_positive_coefficients = np.argsort(coef)[-top_features:]\n top_negative_coefficients = np.argsort(coef)[:top_features]\n # create plot\n plt.figure(figsize=(15, 5))\n if show_neg:\n top_coefficients = np.hstack([top_negative_coefficients, top_positive_coefficients])\n colors = ['red' if c < 0 else 'blue' for c in coef[top_coefficients]]\n plt.bar(np.arange(2 * top_features), coef[top_coefficients], color=colors)\n feature_names = np.array(feature_names)\n plt.xticks(np.arange(1, 1 + 2 * top_features), feature_names[top_coefficients], rotation=60, ha='right')\n else:\n top_coefficients = top_positive_coefficients\n plt.bar(np.arange(top_features), coef[top_coefficients])\n feature_names = np.array(feature_names)\n plt.xticks(np.arange(top_features), feature_names[top_coefficients], rotation=60, ha='right')\n\n plt.show()\n\ndef stem_tokenizer(doc):\n stop_words = set(stopwords.words('english'))\n tokens = word_tokenize(doc)\n stemmer = snowball.SnowballStemmer(\"english\", ignore_stopwords=True)\n stemmed_tokens = [stemmer.stem(word) for word in tokens]\n list_tokens = [tok.lower() for tok in stemmed_tokens if tok.isalpha()]\n return(' '.join(list_tokens))\n\ndef load_data(file_names = [\"data/Isla Vista - All Excerpts - 1_2_2019.xlsx\",\n \"data/Marysville - All Excerpts - Final - 1_2_2019.xlsx\",\n \"data/Newtown - All Excerpts - 1-2-2019.xlsx\",\n \"data/Charleston - All Excerpts - 7_15_2019.xlsx\",\n \"data/Orlando - All Excerpts - 7_15_2019.xlsx\",\n \"data/San Bernardino - All Excerpts - 7_15_2019.xlsx\",\n \"data/Vegas - All Excerpts - 7_15_2019.xlsx\"]):\n\n\n excerpts = []\n account_labels = []\n original_file = []\n docid = []\n for file_name in file_names:\n if 'xlsx' in file_name:\n data = pd.read_excel(file_name, sheet_name='Dedoose Excerpts Export')\n else:\n data = pd.read_csv(file_name, encoding = 'utf-8')\n data = data.dropna(axis=0)\n #ex_col = list(data.columns)[1]\n ex_col = [colname if \"Excerpt\" in colname else \"\" for colname in data.columns]\n ex_col = \"\".join(ex_col)\n if ex_col is \"\":\n ex_col = \"Sentences\"\n #print(\"Excerpt column: \"+str(ex_col))\n excerpts.extend(list(data[ex_col]))\n labels = [1 if label ==\"True\" else label for label in data['ACCOUNT']]\n labels = [0 if label ==\"False\" else label for label in labels]\n labels = [int(label) for label in labels]\n account_labels.extend(labels)\n original_file.extend([file_name]*len(data[ex_col]))\n docid.extend(list(data['StoryID']))\n\n output_data = {'file':original_file, 'StoryID': docid, 'Excerpts': excerpts, 'ACCOUNT': account_labels}\n return pd.DataFrame(output_data)\n\ndef find_best_classifier(file_names = [\"Isla Vista - All Excerpts - 1_2_2019.xlsx\",\n \"Marysville - All Excerpts - Final - 1_2_2019.xlsx\",\n \"Newtown - All Excerpts - 1-2-2019.xlsx\"]):\n\n excerpts = []\n account_labels = []\n for file_name in file_names:\n if 'xlsx' in file_name:\n data = pd.read_excel(file_name, sheet_name='Dedoose Excerpts Export')\n else:\n data = pd.read_csv(file_name, encoding = 'utf-8')\n\n data = data.dropna(axis=0)\n ex_col = [colname if \"Excerpts\" in colname else \"\" for colname in data.columns]\n ex_col = \"\".join(ex_col)\n if ex_col is \"\":\n ex_col = \"Sentences\"\n\n excerpts.extend(list(data[ex_col]))\n labels = [1 if label ==\"True\" else label for label in data['ACCOUNT']]\n labels = [0 if label ==\"False\" else label for label in labels]\n labels = [int(label) for label in labels]\n account_labels.extend(labels)\n\n\n # stem + count\n docs = [stem_tokenizer(doc) for doc in excerpts]\n count_vectorizer = CountVectorizer(max_features=1000, min_df=10, max_df=0.7,\n stop_words=stopwords.words('english'))\n stem_count_X = count_vectorizer.fit_transform(docs).toarray()\n\n # stem + tfidf\n docs = [stem_tokenizer(doc) for doc in excerpts]\n tfidf_vectorizer = TfidfVectorizer(max_features=1000, min_df=10, max_df=0.7,\n stop_words=stopwords.words('english'))\n stem_tfidf_X = tfidf_vectorizer.fit_transform(docs).toarray()\n\n doc_vectors = {'vectorizers':[count_vectorizer, tfidf_vectorizer],\n 'vectors':[stem_count_X, stem_tfidf_X],\n 'type':[\"count vector\", \"tfidf vector\"]}\n\n #Create a svm Classifier\n clf = svm.SVC(kernel='linear', class_weight = 'balanced') # Linear Kernel\n logreg = LogisticRegression(solver='lbfgs', max_iter=1000, class_weight='balanced')\n rf = RandomForestClassifier(n_estimators=500, random_state=0, class_weight=\"balanced\")\n\n classifiers = [clf, logreg, rf]\n classifier_f1s = []\n classifier_best_vecs = []\n classifier_best_vectorizer = []\n\n for classi in classifiers:\n f1 = []\n y_preds = []\n print(\"Processing classifier: \"+str(type(classi).__name__))\n for doc_vector in doc_vectors['vectors']:\n docs_train, docs_test, y_train, y_test \\\n = train_test_split(pd.DataFrame(doc_vector), account_labels,\n test_size=0.25, random_state=50)\n\n classi.fit(docs_train, y_train)\n y_pred = classi.predict(docs_test)\n try:\n f1s = f1_score(y_test,y_pred)\n except:\n return y_test, y_pred\n f1.append(f1s)\n y_preds.append(y_pred)\n\n print(\"Classifier: \"+str(type(classi).__name__)+\" f1: \"+str(f1))\n\n best_vec = np.argmax(f1)\n print(doc_vectors['type'][best_vec]+\" \"+str(type(classi).__name__)+\" results:\")\n print(confusion_matrix(y_test,y_preds[best_vec]))\n print(classification_report(y_test,y_preds[best_vec]))\n classifier_f1s.append(max(f1))\n classifier_best_vecs.append(doc_vectors['vectors'][best_vec])\n classifier_best_vectorizer.append(doc_vectors['vectorizers'][best_vec])\n\n best_classi_id = np.argmax(classifier_f1s)\n best_classi = classifiers[best_classi_id]\n best_vecs = classifier_best_vecs[best_classi_id]\n best_vectorizer = classifier_best_vectorizer[best_classi_id]\n\n docs_train, docs_test, y_train, y_test \\\n = train_test_split(pd.DataFrame(best_vecs), account_labels,\n test_size=0.2, random_state=0)\n\n best_classi.fit(docs_train, y_train)\n y_pred = best_classi.predict(docs_test)\n results = {\"predicted\":y_pred, \"actual\":y_test,\n \"test_vectors\":docs_test,\n 'test_excerpts': [excerpts[idx] for idx in list(docs_test.index)],\n \"classifier\":best_classi,\n \"vectorizer\": best_vectorizer, \"vectors\":best_vecs}\n return results\n\nif __name__ == '__main__':\n results = find_best_classifier()\n","repo_name":"anjapago/AnalyzeAccountability","sub_path":"notebooks/classifiers/binary_classifier.py","file_name":"binary_classifier.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38452554007","text":"def countdown(n):\n print('Counting down from %d' % n)\n while n > 0:\n yield n\n n -= 1\n return\n\nc = countdown(5)\nprint(c.next())\nfor n in c:\n print(n)\n\nc.close()\nc.next()","repo_name":"anderscui/Programming","sub_path":"py/fn/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"602034669","text":"from pathlib import Path\nimport parser_r as pr\nimport parser_w as pw\nimport json\nimport docManager\nimport folderManager\nimport shutil\n\ndef get_ocr(name,batch_name):\n\tpath = Path.cwd()/\"tmp\"/batch_name/name/\"regions\"/\"regions.JSON\"\n\twith open(path,\"rb\") as file:\n\t\tdata = json.load(file)\n\t\treturn data\n\n\ndef get_hocr(name,page):\n\tlines = []\n\tareas = pr.get_areas(page)\n\tfor area in areas:\n\t\tline = pr.get_lines(page,area['id'])\n\t\tfor element in line:\n\t\t\tlines.append(element)\n\treturn lines\n\n\ndef compare(name,page,batch_name):\n\taux = get_ocr(name,batch_name)\n\td1 = get_hocr(name,page)\n\tnew_d1 = []\n\n\timage = str(page.stem) + \".tiff\"\n\n\tfor page in aux:\n\t\tif page['image'] == image:\n\t\t\td2 = page['regions']\n\n\tfor i in d1:\n\t\tfor h in d2:\n\t\t\tif i['bbox'] == h['bbox'] and (i['word_conf'] != h['word_conf']):\n\t\t\t\t\n\t\t\t\tbest = [max(value) for value in zip(i['word_conf'],h['word_conf'])]\n\t\t\t\tfor r in range(len(best)):\n\t\t\t\t\tif best[r] == h['word_conf'][r]:\n\t\t\t\t\t\ti['text'][r] = h['text'][r] \n\t\t\t\t\t\ti['word_conf'][r] = h['word_conf'][r]\n\n\t\t\t\tnew_d1.append(i)\n\n\t\t\telif i['bbox'] == h['bbox'] and (i['word_conf'] == h['word_conf']):\n\n\t\t\t\tnew_d1.append(i)\n\n\treturn new_d1\n\n\ndef modify(name,tmp,batch_name):\n\tpath = Path.cwd()/\"tmp\"/batch_name/name/\"pages\"\n\tfor page in path.glob(\"*.hocr\"):\n\t\tchanges = compare(name,page,batch_name)\n\t\tpw.change_hocr(page,changes)\n\n\t\tif not tmp:\n\t\t\tdocManager.delete_regions(batch_name,name)\n\n\n\t\t\t\t\t\n\n\n\n\n","repo_name":"joaovizoso/arms_docker","sub_path":"workflow/exec/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10904013749","text":"# encoding: utf-8\r\n# __author:\"xiaoyaoyin\"\r\n\r\n# import the required packages\r\n# import pandas as pd\r\nimport numpy as np\r\nimport random\r\nimport time\r\nimport argparse\r\n\r\n\r\n# calculate the GC content of given sequence\r\ndef calcu_CG(seq):\r\n\t# seq = seq.upper()\r\n\tcount_a = seq.count('A')\r\n\tcount_t = seq.count('T')\r\n\tcount_c = seq.count('C')\r\n\tcount_g = seq.count('G')\r\n\tgc_content = (count_g + count_c) / (count_a + count_c + count_g + count_t)\r\n\treturn gc_content\r\n\r\n\r\n# get the reverse of a sequence\r\ndef reverse(seq):\r\n\tnew_seq = list(reversed(seq))\r\n\treturn \"\".join(new_seq)\r\n\r\n\r\n# get the complement of a sequence\r\ndef complement(seq):\r\n\tnew_seq = []\r\n\tfor i in seq:\r\n\t\tif i=='A':\r\n\t\t\tnew_seq.append('T')\r\n\t\telif i=='T':\r\n\t\t\tnew_seq.append('A')\r\n\t\telif i=='C':\r\n\t\t\tnew_seq.append('G')\r\n\t\telif i=='G':\r\n\t\t\tnew_seq.append('C')\r\n\t\telse:\r\n\t\t\tnew_seq.append('N')\r\n\treturn \"\".join(new_seq)\r\n\r\n\r\n# calculate distance of two seqs\r\ndef calcu_distance(p, q):\r\n\tmaxn=max(len(p),len(q))\r\n\tminn=min(len(p),len(q))\r\n\tl=0;\r\n\tif len(p)==maxn:\r\n\t\tfor k in range(maxn+minn-1):\r\n\t\t\tif k=minn) & (k<=maxn):\r\n\t\t\t\ts = 0\r\n\t\t\t\tp1 = p[k-minn:k]\r\n\t\t\t\tq1 = q[0:minn]\r\n\t\t\t\tfor j in range(minn):\r\n\t\t\t\t\tif p1[j]==q1[j]:\r\n\t\t\t\t\t\ts += 1\r\n\t\t\t\tl = max(l, s)\r\n\t\t\telse:\r\n\t\t\t\ts = 0\r\n\t\t\t\tp1 = p[k-minn:maxn]\r\n\t\t\t\tq1 = q[0:maxn+minn-k]\r\n\t\t\t\tfor j in range(maxn+minn-k):\r\n\t\t\t\t\tif p1[j]==q1[j]:\r\n\t\t\t\t\t\ts += 1\r\n\t\t\t\tl = max(l, s)\r\n\telse:\r\n\t\tfor k in range(maxn+minn-1):\r\n\t\t\tif k=minn) & (k<=maxn):\r\n\t\t\t\ts = 0\r\n\t\t\t\tq1 = q[k-minn:k]\r\n\t\t\t\tp1 = p[0:minn]\r\n\t\t\t\tfor j in range(minn):\r\n\t\t\t\t\tif p1[j]==q1[j]:\r\n\t\t\t\t\t\ts += 1\r\n\t\t\t\tl = max(l, s)\r\n\t\t\telse:\r\n\t\t\t\ts = 0\r\n\t\t\t\tq1 = q[k-minn:maxn]\r\n\t\t\t\tp1 = p[0:maxn+minn-k]\r\n\t\t\t\tfor j in range(maxn+minn-k):\r\n\t\t\t\t\tif p1[j]==q1[j]:\r\n\t\t\t\t\t\ts += 1\r\n\t\t\t\tl = max(l, s)\r\n\treturn l\r\n\r\n\r\n# find if subsequence of length 5 exists\r\ndef subsequence(N):\r\n\tn = len(N)\r\n\tx = reverse(N)\r\n\ty = complement(x)\r\n\tm = 0\r\n\tfor i in range(n-5):\r\n\t\tfor j in range(n-5):\r\n\t\t\tmatch_res = [(x[i+k]==y[j+k]) for k in range(5)]\r\n\t\t\tif sum(match_res)!=5:\r\n\t\t\t\tm += 1\r\n\tif m==(n-5)**2:\r\n\t\tp1 = 1\r\n\telse:\r\n\t\tp1 = 0\r\n\treturn p1\r\n\r\n\r\n# convert any decimal number to base x, x should be smaller than 16\r\ndef decimal_to_x(n, x):\r\n\ta = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'A', 'B', 'C', 'D', 'E', 'F']\r\n\tb = []\r\n\twhile True:\r\n\t\ts = n // x \r\n\t\ty = n % x\r\n\t\tb = b + [y]\r\n\t\tif s==0:\r\n\t\t\tbreak\r\n\t\tn = s\r\n\tb.reverse()\r\n\tdd = [str(i) for i in b]\r\n\treturn \"\".join(dd)\r\n\r\n\r\n# design sticky ends sequences given length and number\r\ndef Seq_Design(lengthsticky, numsticky, param1, param3, param4):\r\n\tN0 = []\r\n\tM = 4\r\n\td = [i for i in range(M**lengthsticky)]\r\n\tNN = [decimal_to_x(i, M).zfill(lengthsticky) for i in d]\r\n\tfor i in range(len(NN)):\r\n\t\ttemp_list = []\r\n\t\tfor j in NN[i]:\r\n\t\t\tif j=='0':\r\n\t\t\t\ttemp_list.append('A')\r\n\t\t\telif j=='1':\r\n\t\t\t\ttemp_list.append('T')\r\n\t\t\telif j=='2':\r\n\t\t\t\ttemp_list.append('C')\r\n\t\t\telse:\r\n\t\t\t\ttemp_list.append('G')\r\n\t\tNN[i] = \"\".join(temp_list)\r\n\tfor i in range(len(NN)):\r\n\t\tN = NN[i]\r\n\t\t# if the max length of single word repeats is larger than param1, kick it\r\n\t\tN = Kick_RepeatOne(N,param1)\r\n\t\t# if the max length of consecutive sequence is larger than param4, kick it\r\n\t\tN = Kick_RepeatSeq(N,param4)\r\n\t\t# if the CG ratio isn't between 0.4 to 0.6, kick it\r\n\t\tif len(N)>0:\r\n\t\t\tGC_percent = calcu_CG(N)\r\n\t\t\tif (GC_percent<=0.4) | (GC_percent>=0.6):\r\n\t\t\t\tN = []\r\n\t\t# compare this new sequence with those we already have, if their distance ratio is closer than q, kick it\r\n\t\tif len(N)>0:\r\n\t\t\tk = 0\r\n\t\t\tfor j in range(len(N0)):\r\n\t\t\t\tk += (calcu_distance(N, N0[j])>=param3*lengthsticky)\r\n\t\t\tif k>0:\r\n\t\t\t\tN = []\r\n\t\t# compare this new sequence with itself to prevent hairpin\r\n\t\tif len(N)>0:\r\n\t\t\tif subsequence(N)==0:\r\n\t\t\t\tN = []\r\n\t\tif len(N)>0:\r\n\t\t\tN0.append(\"\".join(N))\r\n\treturn N0\r\n\r\n\r\n\r\n# KICKREPEAT gives a set of seq without repeat ones\r\ndef Kick_RepeatOne(N, k):\r\n\ttemp = 0\r\n\tfor i in range(len(N)-k):\r\n\t\tif calcu_distance(N[i:i+k], N[(i+1):len(N)])==k:\r\n\t\t\ttemp += 1\r\n\tif temp>0:\r\n\t\tM = []\r\n\telse:\r\n\t\tM = N\r\n\treturn M\r\n\r\n\r\n# KICKREPEAT gives a set of seq without repeat ones.\r\ndef Kick_RepeatSeq(N, k):\r\n\t#N the former set, pp the later set, k the repeat num,at least 2\r\n\tpp = []\r\n\tq1 = [0 for i in range(k-1)]\r\n\tfor i in range(len(N)):\r\n\t\ttemp = 0\r\n\t\tfor j in range(len(N[i])-k+1):\r\n\t\t\tfor m in range(k-1):\r\n\t\t\t\tq1[m] = (N[i][j]==N[i][j+m])\r\n\t\t\tif min(q1)==1:\r\n\t\t\t\ttemp += 1\r\n\t\tif temp==0:\r\n\t\t\tpp.append(N[i])\r\n\treturn pp\r\n\r\n\r\n# main\r\nif __name__ == '__main__': \r\n\tstart_time = time.time()\r\n\tparser = argparse.ArgumentParser(description='manual to this script')\r\n\tparser.add_argument(\"--lengthsticky\", type=int, default=6)\r\n\tparser.add_argument(\"--numsticky\", type=int, default=1)\r\n\tparser.add_argument(\"--single_word_repeats\", type=int, default=3)\r\n\t# parse.add_argument(\"--max_overlap\", type=int, default=5)\r\n\tparser.add_argument(\"--max_ratio\", type=float, default=0.6) # 1-0.4\r\n\tparser.add_argument(\"--max_repeats\", type=int, default=5)\r\n\targs = parser.parse_args()\r\n\tlengthsticky = args.lengthsticky\r\n\tnumsticky = args.numsticky\r\n\tparam1 = args.single_word_repeats # the max length of single word repeats\r\n\t# param2 = args.max_overlap # the total length of overlap between two frame sequences like a,b,c,d,e\r\n\tparam3 = args.max_ratio # the max ratio of overlap between two sticky sequences\r\n\tparam4 = args.max_repeats # the max length of repeats\r\n\tS0 = Seq_Design(lengthsticky, numsticky, param1, param3, param4)\r\n\tR1 = [reverse(i) for i in S0]\r\n\tS1 = [complement(i) for i in R1]\r\n\tprint('sticky ends designed')\r\n\tfor i in range(len(S0)):\r\n\t\tprint(S0[i])\r\n\tend_time = time.time()\r\n\tprint('time used ',end_time-start_time)\r\n","repo_name":"yinxy1992/DNA_computing","sub_path":"sticky_ends_design.py","file_name":"sticky_ends_design.py","file_ext":"py","file_size_in_byte":5907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12619677007","text":"import sys\nsys.path.insert(0, '..')\nimport argparse\nimport os \nimport pickle\nfrom tqdm import tqdm\nimport pandas as pd\nfrom subprocess import call\nimport random\n\nTOTAL_SENTENCES = 40418261\n\nparser = argparse.ArgumentParser(description='Preprocess program for librispeech-lm-norm.txt')\nparser.add_argument('--data_path', type=str, help='Path to LibriSpeech\\'s extra text.')\nparser.add_argument('--output_path',type=str, help='Path to store output csv file')\n#parser.add_argument('--bpe_source', type=str, help='Path to pretrained BPE.')\nparser.add_argument('--target', default='subword', type=str, help='Learning target ( phoneme / char / subword / word )', required=False)\nparser.add_argument('--n_tokens', default=5000, type=int, help='Vocabulary size of target', required=False)\nparser.add_argument('--n_samples', default=320000, type=int, help='Sample a part of txt as training data.', required=False)\nparas = parser.parse_args()\n\n\n# Encode data\nwith open(os.path.join(paras.output_path,\"mapping.pkl\"), \"rb\") as fp:\n word_dic = pickle.load(fp)\nbpe_dir = os.path.join(paras.output_path,\"bpe\")\n\ncall(['spm_encode',\n '--model='+os.path.join(bpe_dir,'bpe.model'),\n '--output_format=piece'\n ],stdin=open(paras.data_path,'r'),\n stdout=open(os.path.join(bpe_dir,'encode','extra.txt'),'w'))\ny = []\n\ni = 0\nfreq = TOTAL_SENTENCES//paras.n_samples\nwith open(os.path.join(bpe_dir,'encode','extra.txt'),'r') as f:\n for line in f:\n i += 1\n if i%freq == 0:\n tmp = ['0']+[str(word_dic[c]) for c in line[:-1].split(' ')]+['1']\n y.append('_'.join(tmp))\ndf = pd.DataFrame(data={'file_path':['None' for f in y],'length':[0 for f in y],'label':y})\ndf.to_csv(os.path.join(paras.output_path,'extra.csv'))\n","repo_name":"Eman22S/Amharic-Seq2Seq","sub_path":"data/preprocess_libri_extra_text.py","file_name":"preprocess_libri_extra_text.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"32407852132","text":"import pandas as pd\n\n# Create Spark session\nspark_active = False\nif spark_active:\n from pyspark.sql import SparkSession\n from pyspark.sql.functions import *\n from pyspark.sql.functions import lit\n\n spark = SparkSession.builder \\\n .master(\"local\") \\\n .config(\"spark.sql.autoBroadcastJoinThreshold\", -1) \\\n .config(\"spark.executor.memory\", \"500mb\") \\\n .appName(\"Test01\") \\\n .getOrCreate()\n\n\ndef get_main_spark_df():\n \n main_path = 'source/dataset/'\n \n #setting up dataframe \n df_train_table = spark.read.options(header='True', inferSchema='True').csv(main_path + 'train_set.csv')\n df_train_table = df_train_table.withColumn('path', lit(main_path + 'train_set/'))\n \n df_test_table = spark.read.options(header='True', inferSchema='True').csv(main_path + 'test_set.csv')\n df_test_table = df_test_table.withColumn('path', lit(main_path + 'test_set/'))\n \n df = df_train_table.union(df_test_table)\n\n df = df.drop('master_tree')\n\n return df\n\n\ndef get_main_df():\n \n #setting up dataframe \n main_path = 'source/dataset/'\n df_train_table = pd.read_csv(main_path + 'train_set.csv')\n df_train_table.index = df_train_table.pk_id\n df_train_table['path'] = main_path + 'train_set/'\n del df_train_table['pk_id']\n\n df_test_table = pd.read_csv(main_path + 'test_set.csv')\n df_test_table.index = df_test_table.pk_id\n df_test_table['path'] = main_path + 'test_set/'\n del df_test_table['pk_id']\n\n df = df_train_table.append(df_test_table, sort=False)\n\n del df['master_tree']\n del df['file_type']\n del df['category']\n\n return df\n\n\n\nclass ModelTemplate():\n def __init__(self, name, xvalid, xtrain, classifier):\n self.xvalid_value = xvalid\n self.xtrain_value = xtrain\n self.name_value = name\n self.classifier = classifier","repo_name":"nbuzzano/nlp-text-classification","sub_path":"dags/features_utils.py","file_name":"features_utils.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36435333766","text":"import logging\nimport sqlite3\n\n\ndef start_initial_db_config():\n conn = sqlite3.connect(\"9gag.db\")\n cursor = conn.cursor()\n\n # get the count of tables with the name\n cursor.execute(\n \"\"\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='imgs_src'\"\"\"\n )\n\n # if the count is 1, then table exists\n if not cursor.fetchone()[0] == 1:\n cursor.execute(\n \"\"\"CREATE TABLE imgs_src (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, src TEXT NOT NULL)\"\"\"\n )\n\n logging.info(\"Successfully created table.\")\n\n conn.close()\n\n return\n\n conn.commit()\n conn.close()\n","repo_name":"mdcg/9gag-rpa","sub_path":"src/bot/db/initial_config.py","file_name":"initial_config.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30120472686","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n# filename new_beiwanglu.py\n# author MaxD\n\nimport string\nimport random\nfrom color_me import ColorMe\n\nzong_list = []\n\n\ndef Add_list():\n\n str = input('请输入一段文字,包含人物,时间,事件.每个元素间通过\",\"分割:')\n\n str_list = str.split(',')\n Man = str_list[0]\n Time = str_list[1]\n Thing = str_list[2]\n \n Thing_dict = {\n '人物':Man,\n '时间':Time,\n '待办事项':Thing\n }\n\n zong_list.append(Thing_dict)\n return()\n\ndef Find_list():\n if zong_list is not None: \n Find_con = input('请输入查询内容(例如 1.30或10:28或开会 ): ')\n for list_num in range(len(zong_list)):\n # for Find_con in zong_list[list_num]:\n if Find_con in zong_list[list_num].values():\n # print (zong_list[list_num].items())\n for k,v in zong_list[list_num].items():\n print (f'{ColorMe(k).blue()}:{ColorMe(v).red()}')\n print()\n else:\n print('请增加备忘录内容')\n\ndef Del_list():\n if len(zong_list) != 0:\n\n Del_con = input('可根据条目内容删除某个事件(例如 输入10:30则将删除该时间点的一条记录):')\n for list_num in range(len(zong_list)):\n if Del_con in zong_list[list_num].values():\n print(zong_list[list_num])\n Del_yn = input('是否删除这条备忘录?(y/n)')\n if Del_yn == 'y':\n del zong_list[list_num]\n break\n elif Del_yn == 'n':\n continue\n elif len(zong_list) == 0:\n f = ColorMe('目前备忘录已被清空').red()\n print (f'\\n {f} \\n')\n\ndef Look_list():\n if len(zong_list) != 0:\n for i in range(len(zong_list)):\n for k,v in zong_list[i].items():\n print (f'{ColorMe(k).blue()}:{ColorMe(v).red()}')\n print ('_'*30)\n elif len(zong_list) == 0:\n f = ColorMe('目前备忘录没内容').blue()\n print(f'\\n {f} \\n')\n\nwhile True:\n choose = input('''\n 请选择如下几种操作:\n \\n\n A 增加备忘录内容\n F 查找备忘录内容\n D 删除备忘录内容\n L 列出所有内容\n Q 退出\n \\n\n ''')\n\n if choose == 'A':\n Add_list()\n\n if choose == 'F':\n Find_list()\n\n if choose == 'D':\n Del_list()\n\n if choose == 'L':\n Look_list()\n\n if choose == 'Q':\n print('Bye')\n exit(0)\n if choose not in ['A','F','D','Q','L']:\n print('Are you kidding me???')\n print()","repo_name":"storyskya/python_student","sub_path":"new_beiwanglu.py","file_name":"new_beiwanglu.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17080407170","text":"from argparse import ArgumentParser\nfrom datetime import datetime\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\n\nfrom transformers import BertModel, BertTokenizer, get_linear_schedule_with_warmup\n\nfrom datasets import TweetDataset, create_minibatch\nfrom models import BertClassifier\nfrom utils import timer\n\n\ndef set_seed(seed):\n torch.manual_seed(seed)\n\n\ndef read_data(data_dir: Path):\n train_df = pd.read_csv(data_dir / \"train.csv\")\n train_data = [(text, label) for text, label in zip(train_df[\"text\"], train_df[\"target\"])]\n train_data, val_data = train_test_split(train_data)\n test_df = pd.read_csv(data_dir / \"test.csv\")\n return train_data, val_data, (test_df[\"id\"], test_df[\"text\"])\n\n\ndef batch_to_cuda(ipt):\n return {k: v.cuda() for k, v in ipt.items()}\n\n\ndef train(args, model, train_dataloader, val_dataloader):\n criterion = nn.BCELoss()\n optimizer = torch.optim.Adam(\n [\n {\"params\": model.bert_model.parameters(), \"lr\": 1e-5},\n {\"params\": model.classifier.parameters()},\n ],\n lr=args.lr,\n weight_decay=args.weight_decay,\n )\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.train_warmup_steps,\n num_training_steps=len(train_dataloader) * args.train_epochs,\n )\n\n @timer\n def run_epoch(dataloader, train=False):\n model.train() if train else model.eval()\n\n loss_history, accu_history = [], []\n with torch.enable_grad() if train else torch.no_grad():\n for x, y in tqdm(\n dataloader, desc=\"Train |\" if train else \" Val |\", ncols=120, leave=False\n ):\n if train:\n optimizer.zero_grad()\n\n y_hat = model(batch_to_cuda(x)).cpu()\n loss = criterion(y_hat, y)\n\n if train:\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n loss_history.append(loss.detach())\n accu_history.append(np.mean((torch.round(y_hat) == y).tolist()))\n return np.mean(loss_history), np.mean(accu_history)\n\n if args.freeze_bert:\n print(\"=== Freeze Bert Weights ===\")\n for param in model.bert_model.parameters():\n param.requires_grad = False\n\n for epoch in range(1, args.train_epochs + 1):\n print(f\"Epoch {epoch:2d} / {args.train_epochs}\")\n\n train_time, (train_loss, train_accu) = run_epoch(train_dataloader, train=True)\n print(f\" Train [{train_time:6.3f}] | loss: {train_loss:.4f} ; accu {train_accu:.4f}\")\n\n val_time, (val_loss, val_accu) = run_epoch(val_dataloader, train=False)\n print(f\" Val [{val_time:6.3f}] | loss: {val_loss:.4f} ; accu {val_accu:.4f}\")\n\n if epoch == args.unfreeze_bert_after:\n print(\"=== Unfreeze Bert Weights ===\")\n for param in model.bert_model.parameters():\n param.requires_grad = True\n\n if epoch % args.checkpoint_every == 0:\n torch.save(\n {\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"scheduler_state_dict\": scheduler.state_dict(),\n },\n args.output_dir / f\"checkpoints_{epoch:02d}.pt\",\n )\n\n\ndef predict(args, model, test_dataloader):\n predictions = []\n\n model.eval()\n with torch.no_grad():\n for x in tqdm(test_dataloader, desc=\"Predicting\", ncols=120, leave=False):\n y_hat = model(batch_to_cuda(x)).cpu()\n predictions.append(round(y_hat.item()))\n\n return predictions\n\n\ndef main(args):\n set_seed(args.seed)\n\n args.output_dir.mkdir(exist_ok=True, parents=True)\n\n train_data, val_data, (test_ids, test_data) = read_data(args.data_dir)\n\n bert_model = BertModel.from_pretrained(\"bert-base-uncased\", return_dict=True)\n bert_tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n\n train_dataloader = DataLoader(\n TweetDataset(train_data, bert_tokenizer),\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=args.num_workers,\n collate_fn=create_minibatch,\n )\n val_dataloader = DataLoader(\n TweetDataset(val_data, bert_tokenizer),\n batch_size=args.train_batch_size,\n num_workers=args.num_workers,\n collate_fn=create_minibatch,\n )\n test_dataloader = DataLoader(\n TweetDataset(test_data, bert_tokenizer, has_label=False),\n collate_fn=create_minibatch,\n )\n\n model = BertClassifier(bert_model, 1).cuda()\n\n if args.do_train:\n train(args, model, train_dataloader, val_dataloader)\n torch.save(model.state_dict(), args.output_dir / \"model_weights.pt\")\n\n if args.do_predict:\n ckpt = torch.load(args.weights_path)\n if \"model_state_dict\" in ckpt:\n model.load_state_dict(ckpt[\"model_state_dict\"])\n else:\n model.load_state_dict(ckpt)\n\n predictions = predict(args, model, test_dataloader)\n\n with open(args.output_dir / \"prediction.csv\", \"w\") as f:\n print(\"id,target\", file=f)\n for i, label in zip(test_ids, predictions):\n print(f\"{i},{label}\", file=f)\n\n\ndef parse_arguments():\n parser = ArgumentParser()\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--do_predict\", action=\"store_true\")\n\n parser.add_argument(\"--lr\", type=float, default=5e-4)\n parser.add_argument(\"--weight_decay\", type=float, default=0)\n\n parser.add_argument(\"--freeze_bert\", action=\"store_true\")\n parser.add_argument(\"--unfreeze_bert_after\", type=int, default=15)\n\n parser.add_argument(\"--train_epochs\", type=int, default=25)\n parser.add_argument(\"--train_batch_size\", type=int, default=32)\n parser.add_argument(\"--train_warmup_steps\", type=int, default=200)\n parser.add_argument(\"--predict_batch_size\", type=int, default=1)\n\n parser.add_argument(\"--checkpoint_every\", type=int, default=5)\n\n parser.add_argument(\"--data_dir\", type=Path, default=\"data/\")\n parser.add_argument(\"--output_dir\", type=Path)\n parser.add_argument(\"--weights_path\", type=Path)\n\n parser.add_argument(\"--seed\", type=int, default=0x06902029)\n parser.add_argument(\"--num_workers\", type=int, default=8)\n\n args = parser.parse_args()\n\n if not args.weights_path:\n if not args.output_dir:\n args.output_dir = Path(f\"outputs/{datetime.now().strftime('%m%d-%H%M%S')}\")\n args.weights_path = args.output_dir / \"model_weights.pt\"\n else:\n args.output_dir = args.weights_path.parent\n\n return args\n\n\nif __name__ == \"__main__\":\n main(parse_arguments())\n","repo_name":"jimpei8989/NLP-Disaster-Tweets","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9570440776","text":"import json\nimport math as m\nfrom decimal import Decimal\n\nfrom django.db import models\nfrom django.utils import timezone, translation\nfrom djmoney.models.fields import MoneyField\nfrom djmoney.money import Money\nfrom moneyed.localization import format_money\n\nfrom goosetools.core.models import System\nfrom goosetools.notifications.notification_types import NOTIFICATION_TYPES\nfrom goosetools.ownership.models import TransferLog\nfrom goosetools.users.models import Character, GooseUser\n\n\nclass JSONEncoderWithMoneyAndDecimalSupport(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, Money):\n return format_money(o, locale=translation.get_language())\n if isinstance(o, Decimal):\n return str(m.floor(o))\n # Let the base class default method raise the TypeError\n return json.JSONEncoder.default(self, o)\n\n\nclass Contract(models.Model):\n from_user = models.ForeignKey(\n GooseUser, on_delete=models.CASCADE, related_name=\"my_contracts\"\n )\n to_char = models.ForeignKey(Character, on_delete=models.CASCADE)\n system = models.ForeignKey(System, on_delete=models.CASCADE)\n transfer = models.ForeignKey(\n TransferLog, on_delete=models.CASCADE, null=True, blank=True\n )\n created = models.DateTimeField()\n status = models.TextField(\n choices=[\n (\n \"requested\",\n \"requested\",\n ),\n # A requested contract is the system asking from_user to make a contract to to_char\n (\"pending\", \"pending\"),\n (\"rejected\", \"rejected\"),\n (\"accepted\", \"accepted\"),\n (\"cancelled\", \"cancelled\"),\n ]\n )\n log = models.JSONField(null=True, blank=True) # type: ignore\n logged_quantity = models.IntegerField(default=0)\n isk = MoneyField(\n max_digits=20, decimal_places=2, default_currency=\"EEI\", default=0\n ) # If positive then from_user is sending to_char isk, Negative then from_user will be recieving isk from to_char\n\n def change_status(self, new_status):\n if new_status != \"pending\":\n if self.status == \"requested\":\n NOTIFICATION_TYPES[\"contract_requested\"].dismiss_one(self.from_user)\n else:\n NOTIFICATION_TYPES[\"contract_made\"].dismiss_one(self.to_char.user)\n self.status = new_status\n\n def isk_display(self):\n\n # pylint: disable=no-member\n isk_amount = self.isk.amount\n if isk_amount == 0:\n return \"0\"\n elif isk_amount > 0:\n return (\n f\"{self.from_user} pays {self.to_char}({self.to_char.user}) {self.isk}\"\n )\n else:\n return (\n f\"{self.to_char}({self.to_char.user}) pays {self.from_user} {-self.isk}\"\n )\n\n @staticmethod\n def create(from_user, to_char, system, status, isk=0, transfer=None):\n contract = Contract(\n from_user=from_user,\n to_char=to_char,\n system=system,\n created=timezone.now(),\n status=status,\n isk=isk,\n transfer=transfer,\n )\n if status == \"requested\":\n NOTIFICATION_TYPES[\"contract_requested\"].send(from_user)\n else:\n NOTIFICATION_TYPES[\"contract_made\"].send(to_char.user)\n contract.full_clean()\n contract.save()\n return contract\n\n def save_items_to_log(self, clear_items=True):\n log = []\n for item in self.inventoryitem_set.all():\n log.append(\n {\n \"id\": item.id,\n \"item\": str(item),\n \"quantity\": item.quantity,\n \"status\": item.status(),\n \"loot_group_id\": item.loot_group and item.loot_group.id,\n }\n )\n self.log = json.dumps(log, cls=JSONEncoderWithMoneyAndDecimalSupport)\n self.logged_quantity = self.inventoryitem_set.count()\n self.full_clean()\n self.save()\n if clear_items:\n self.inventoryitem_set.update(contract=None)\n\n def can_change_status_to(self, user, new_status):\n is_recieving_user = self.to_char.user == user\n is_sending_user = self.from_user == user\n if self.status == \"requested\":\n if new_status == \"cancelled\":\n return is_recieving_user\n elif new_status in (\"rejected\", \"pending\"):\n return is_sending_user\n else:\n return False\n elif self.status == \"pending\":\n return (\n is_recieving_user\n and new_status in [\"accepted\", \"rejected\"]\n or is_sending_user\n and new_status in [\"cancelled\"]\n )\n else:\n return False\n\n def total_items(self):\n return self.inventoryitem_set.count() + self.logged_quantity\n","repo_name":"nigel-gott/spacepaperwork","sub_path":"goosetools/contracts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"32017665037","text":"import datetime\nimport logging\n\nfrom .init_base import cursor, con\n\n\ndef add_user(telegram_id: int, username: str) -> bool:\n try:\n cursor.execute(\"INSERT INTO users \"\n \"(telegram_id, username) \"\n \"VALUES (?, ?)\",\n (telegram_id, username))\n con.commit()\n return True\n except Exception as ex:\n logging.error(f'Error add user in base {ex}')\n return False\n\n\ndef get_stats_by_telegram_id(telegram_id: int | str) -> tuple | None:\n cursor.execute(\"SELECT words_total, words_translated, delay FROM users WHERE telegram_id = ?\", (telegram_id,))\n return cursor.fetchone()\n\n\ndef get_stats_by_username(username: str) -> tuple | None:\n cursor.execute(\"SELECT words_total, words_translated, delay FROM users WHERE username = ?\", (username,))\n return cursor.fetchone()\n\n\ndef get_current_delay(telegram_id: int) -> int | None:\n cursor.execute(\"SELECT delay FROM users WHERE telegram_id = ?\", (telegram_id,))\n cur_delay = cursor.fetchone()\n if cur_delay:\n return cur_delay[0]\n else:\n return None\n\n\ndef change_distrib(telegram_id: int) -> tuple:\n try:\n cursor.execute(\"SELECT distribution FROM users WHERE telegram_id = ?\", (telegram_id,))\n distrib = cursor.fetchone()\n if distrib:\n distrib: bool = distrib[0]\n cursor.execute(f\"UPDATE users SET distribution = ? WHERE telegram_id = ?\",\n (not distrib, telegram_id))\n if not distrib:\n update_last_distrib(telegram_id, datetime.datetime.now().timestamp())\n con.commit()\n return True, not distrib\n else:\n return False, False\n except Exception as ex:\n logging.error(f'Error change distrib: {ex}, {telegram_id}')\n return False, False\n\n\ndef get_users_distribution() -> list | None:\n try:\n cursor.execute(\"SELECT telegram_id, delay, distribution, last_distrib FROM users\")\n users = cursor.fetchall()\n if len(users):\n return users\n else:\n return None\n except Exception as ex:\n logging.error(f'Error get all users distrib: {ex}')\n return None\n\n\ndef set_new_delay(telegram_id: int, new_delay: int) -> bool:\n try:\n cursor.execute(\"UPDATE users SET delay = ? WHERE telegram_id = ?\", (new_delay, telegram_id))\n con.commit()\n return True\n except Exception as ex:\n logging.error(f'Error update delay: {ex}, {telegram_id}, {new_delay}')\n return False\n\n\ndef update_last_distrib(telegram_id: int, new_distrib: datetime.datetime) -> None:\n try:\n cursor.execute(\"UPDATE users SET last_distrib = ? WHERE telegram_id = ?\", (new_distrib, telegram_id))\n con.commit()\n except Exception as ex:\n logging.error(f'Error update last distrib in base: {ex}, {telegram_id}')\n","repo_name":"bridges123/eng_learn_bot","sub_path":"db/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21530675738","text":"\"\"\"\nViews for magnolia tree blog django project.\n\"\"\"\nfrom django.shortcuts import render, get_object_or_404, reverse\nfrom django.views import generic, View\nfrom django.views.generic.edit import UpdateView, DeleteView, CreateView\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.text import slugify\nfrom .models import BlogPost, BlogComment\nfrom .forms import CommentForm, CreateBlogPost\n\n\ndef home_page(request):\n \"\"\"\n View for home page.\n \"\"\"\n return render(request, 'index.html')\n\n\ndef classes_page(request):\n \"\"\"\n View for classes page.\n \"\"\"\n return render(request, 'classes.html')\n\n\ndef contact_page(request):\n \"\"\"\n View for contact page\n \"\"\"\n return render(request, 'contact.html')\n\n\nclass BlogPage(generic.ListView):\n \"\"\"\n View for blog page. Inspired by \"I think therefore I blog\"\n from Code Institute\n \"\"\"\n model = BlogPost\n queryset = BlogPost.objects.filter(status=1).order_by('-created_on')\n template_name = 'blog.html'\n paginate_by = 7\n\n\nclass BlogPostDetail(View):\n \"\"\"\n Detailed view of a blogpost. Inspired by \"I think therefore I blog\"\n frome Code Institute\n \"\"\"\n def get(self, request, slug, *args, **kwargs):\n \"\"\"\n Creates a URL based on the slug of the Blog post\n \"\"\"\n queryset = BlogPost.objects.filter(status=1)\n post = get_object_or_404(queryset, slug=slug)\n comments = post.comments.filter(approved=True).order_by('-created_on')\n liked = False\n if post.likes.filter(id=self.request.user.id).exists():\n liked = True\n\n return render(\n request,\n 'blog-post.html',\n {\n 'post': post,\n 'comments': comments,\n 'commented': False,\n 'liked': liked,\n 'comment_form': CommentForm()\n },\n )\n\n def post(self, request, slug, *args, **kwargs):\n \"\"\"\n Submit a new comment to a\n blog post.\n \"\"\"\n queryset = BlogPost.objects.filter(status=1)\n post = get_object_or_404(queryset, slug=slug)\n comments = post.comments.filter(approved=True).order_by('-created_on')\n liked = False\n if post.likes.filter(id=self.request.user.id).exists():\n liked = True\n\n comment_form = CommentForm(data=request.POST)\n\n if comment_form.is_valid():\n comment_form.instance.email = request.user.email\n comment_form.instance.name = request.user\n comment = comment_form.save(commit=False)\n comment.post = post\n comment.save()\n messages.success(\n request,\n \"Your comment will be reviewd before it's published. \"\n )\n else:\n comment_form = CommentForm()\n\n return render(\n request,\n 'blog-post.html',\n {\n 'post': post,\n 'comments': comments,\n 'commented': True,\n 'liked': liked,\n 'comment_form': CommentForm()\n },\n )\n\n\nclass LikeBlogPost(View):\n \"\"\"\n Class that makes a user be able to like a blogpost.\n Borrowed from \"I think therefore I am\" project from \n Code institute.\n \"\"\"\n def post(self, request, slug):\n \"\"\"\n Makes sure there is authenticated user\n and allows a like/unlike.\n \"\"\"\n post = get_object_or_404(BlogPost, slug=slug)\n\n if post.likes.filter(id=request.user.id).exists():\n post.likes.remove(request.user)\n else:\n post.likes.add(request.user)\n\n return HttpResponseRedirect(reverse('blog_post', args=[slug]))\n\n\nclass UpdateComment(SuccessMessageMixin, UpdateView):\n \"\"\"\n View for updateing comment\n \"\"\"\n model = BlogComment\n form_class = CommentForm\n context_object_name = 'comment'\n template_name = 'update-comment.html'\n success_message = \"Your comment is updated\"\n\n def form_valid(self, form):\n \"\"\"\n Success url return to blog post\n \"\"\"\n self.success_url = f'/{self.get_object().post.slug}/'\n return super().form_valid(form)\n\n\nclass DeleteComment(DeleteView):\n \"\"\"\n View for deleting comment\n \"\"\"\n model = BlogComment\n context_object_name = 'comment'\n template_name = 'delete-comment.html'\n\n def get_success_url(self, *args):\n \"\"\"\n Success url return to blog post\n \"\"\"\n self.success_url = f'/{self.get_object().post.slug}'\n self.slug = self.get_object().post.slug\n return reverse_lazy('blog_post', args=[self.slug])\n\n\nclass UserBlogPost(SuccessMessageMixin, CreateView):\n \"\"\"\n Form for creating a blog post\n \"\"\"\n model = BlogPost\n form_class = CreateBlogPost\n template_name = 'create-blog-post.html'\n success_url = '../blog/'\n success_message = \"Your blog post is sent for review and will be published if approved\"\n\n def form_valid(self, form):\n \"\"\"\n Validates user when using the form\n \"\"\"\n\n form.instance.author = self.request.user\n form.instance.slug = slugify(form.instance.title)\n return super().form_valid(form)\n","repo_name":"AngelicaGuimaraes/p4_magnolia_tree_blog","sub_path":"magnolia_tree/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35962340306","text":"from discopop_library.discopop_optimizer.classes.edges.ChildEdge import ChildEdge\nfrom discopop_library.discopop_optimizer.classes.edges.OptionEdge import OptionEdge\nfrom discopop_library.discopop_optimizer.classes.edges.RequirementEdge import RequirementEdge\nfrom discopop_library.discopop_optimizer.classes.edges.SuccessorEdge import SuccessorEdge\nfrom discopop_library.discopop_optimizer.classes.edges.TemporaryEdge import TemporaryEdge\nfrom discopop_library.discopop_optimizer.classes.nodes.ContextNode import ContextNode\nfrom discopop_library.discopop_optimizer.classes.nodes.FunctionRoot import FunctionRoot\nfrom discopop_library.discopop_optimizer.classes.nodes.Loop import Loop\nfrom discopop_library.discopop_optimizer.classes.nodes.Workload import Workload\nfrom discopop_library.discopop_optimizer.utilities.MOGUtilities import data_at\n\nimport matplotlib.pyplot as plt # type: ignore\nimport networkx as nx # type: ignore\n\n\ndef show(graph):\n \"\"\"Plots the graph\n\n :return:\n \"\"\"\n fig, ax = plt.subplots()\n try:\n pos = nx.planar_layout(graph) # good\n except nx.exception.NetworkXException:\n try:\n # fallback layouts\n pos = nx.shell_layout(graph) # maybe\n except nx.exception.NetworkXException:\n pos = nx.random_layout(graph)\n\n drawn_nodes = set()\n nodes_lists = dict()\n node_ids = dict()\n node_insertion_sequence = []\n # draw nodes\n node_insertion_sequence.append(FunctionRoot)\n nodes_lists[FunctionRoot] = nx.draw_networkx_nodes(\n graph,\n pos=pos,\n ax=ax,\n node_size=200,\n node_color=\"#ff5151\",\n node_shape=\"d\",\n nodelist=[n for n in graph.nodes if isinstance(data_at(graph, n), FunctionRoot)],\n )\n node_ids[FunctionRoot] = [n for n in graph.nodes if isinstance(data_at(graph, n), FunctionRoot)]\n drawn_nodes.update([n for n in graph.nodes if isinstance(data_at(graph, n), FunctionRoot)])\n\n node_insertion_sequence.append(Loop)\n nodes_lists[Loop] = nx.draw_networkx_nodes(\n graph,\n pos=pos,\n ax=ax,\n node_size=200,\n node_color=\"#ff5151\",\n node_shape=\"s\",\n nodelist=[n for n in graph.nodes if isinstance(data_at(graph, n), Loop)],\n )\n node_ids[Loop] = [n for n in graph.nodes if isinstance(data_at(graph, n), Loop)]\n drawn_nodes.update([n for n in graph.nodes if isinstance(data_at(graph, n), Loop)])\n\n node_insertion_sequence.append(ContextNode)\n nodes_lists[ContextNode] = nx.draw_networkx_nodes(\n graph,\n pos=pos,\n ax=ax,\n node_size=200,\n node_color=\"yellow\",\n node_shape=\"s\",\n nodelist=[n for n in graph.nodes if isinstance(data_at(graph, n), ContextNode)],\n )\n node_ids[ContextNode] = [n for n in graph.nodes if isinstance(data_at(graph, n), ContextNode)]\n drawn_nodes.update([n for n in graph.nodes if isinstance(data_at(graph, n), ContextNode)])\n\n node_insertion_sequence.append(Workload)\n nodes_lists[Workload] = nx.draw_networkx_nodes(\n graph,\n pos=pos,\n ax=ax,\n node_size=200,\n node_color=\"#2B85FD\",\n node_shape=\"o\",\n nodelist=[\n n\n for n in graph.nodes\n if isinstance(data_at(graph, n), Workload) and n not in drawn_nodes\n ],\n )\n node_ids[Workload] = [\n n for n in graph.nodes if isinstance(data_at(graph, n), Workload) and n not in drawn_nodes\n ]\n drawn_nodes.update(\n [n for n in graph.nodes if isinstance(data_at(graph, n), Workload) and n not in drawn_nodes]\n )\n\n # id as label\n labels = {}\n for n in graph.nodes:\n labels[n] = graph.nodes[n][\"data\"].get_plot_label()\n nx.draw_networkx_labels(graph, pos, labels, font_size=7, ax=ax)\n\n nx.draw_networkx_edges(\n graph,\n pos,\n ax=ax,\n edge_color=\"black\",\n edgelist=[e for e in graph.edges(data=\"data\") if isinstance(e[2], SuccessorEdge)],\n )\n\n nx.draw_networkx_edges(\n graph,\n pos,\n ax=ax,\n edge_color=\"red\",\n edgelist=[e for e in graph.edges(data=\"data\") if isinstance(e[2], ChildEdge)],\n )\n\n nx.draw_networkx_edges(\n graph,\n pos,\n ax=ax,\n edge_color=\"green\",\n edgelist=[e for e in graph.edges(data=\"data\") if isinstance(e[2], TemporaryEdge)],\n )\n\n nx.draw_networkx_edges(\n graph,\n pos,\n ax=ax,\n edge_color=\"pink\",\n edgelist=[e for e in graph.edges(data=\"data\") if isinstance(e[2], OptionEdge)],\n )\n\n nx.draw_networkx_edges(\n graph,\n pos,\n ax=ax,\n edge_color=\"yellow\",\n edgelist=[e for e in graph.edges(data=\"data\") if isinstance(e[2], RequirementEdge)],\n )\n\n # define tool tip style when hovering\n # based on https://stackoverflow.com/questions/61604636/adding-tooltip-for-nodes-in-python-networkx-graph\n annot = ax.annotate(\n \"\",\n xy=(0, 0),\n xytext=(20, 20),\n textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n arrowprops=dict(arrowstyle=\"->\"),\n )\n annot.set_visible(False)\n\n idx_to_node_dict = {}\n for idx, node in enumerate(graph.nodes):\n idx_to_node_dict[idx] = node\n\n def update_annot(ind, node_ids_list):\n node_idx = ind[\"ind\"][0]\n node_id = node_ids_list[node_idx]\n xy = pos[node_id]\n annot.xy = xy\n node_attr = {\"node\": node_id}\n node_attr.update(graph.nodes[node_id])\n text = data_at(graph, node_id).get_hover_text()\n annot.set_text(text)\n\n def hover(event):\n vis = annot.get_visible()\n if event.inaxes == ax:\n for node_type in node_insertion_sequence:\n nodes = nodes_lists[node_type]\n try:\n cont, ind = nodes.contains(event)\n if cont:\n update_annot(ind, node_ids[node_type])\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n except TypeError:\n pass\n\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n plt.show()\n","repo_name":"discopop-project/discopop","sub_path":"discopop_library/discopop_optimizer/utilities/visualization/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"72"} +{"seq_id":"72695704874","text":"from django.db.models import Q\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.contrib.auth import login\nfrom accounts.forms import SignUpForm, ProfileForm\nfrom accounts.tokens import token_generator\nfrom accounts.utils import SendEmail\nfrom accounts.models import LinkToken, Profile\nfrom course.models import Registration, PointHistory\n\n\ndef signup(request):\n if request.method == \"POST\":\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.is_active = False\n user.save()\n token = token_generator.make_token(user)\n LinkToken.objects.create(user=user, key=token, used_for=0)\n path = reverse(\"accounts:activate_account\", kwargs={\"token\": token})\n link = request.build_absolute_uri(path)\n SendEmail(user=user).account_activation(link=link)\n messages.success(\n request,\n \"Akun Anda berhasil dibuat, silahkan check email untuk melakukan konfirmasi (check folder spam, promotions, dll)\",\n )\n return redirect(\"accounts:login\")\n\n else:\n form = SignUpForm()\n return render(request, \"accounts/signup.html\", {\"form\": form})\n\n\ndef activate_account(request, **kwargs):\n try:\n key = kwargs[\"token\"]\n token = LinkToken.objects.get(key=key)\n except:\n token = None\n print(token)\n if token is not None and token.is_valid:\n token.user.is_active = True\n token.user.save()\n token.is_valid = False\n token.save()\n login(request, user=token.user)\n messages.success(request, \"Akun Anda sudah aktif!\")\n return redirect(\"home:home\")\n else:\n messages.error(\n request, \"Link activation sudah tidak valid, silahkan register ulang\"\n )\n return redirect(\"accounts:signup\")\n\n\n@login_required()\ndef profile(request):\n if request.method == \"POST\":\n form = ProfileForm(request.POST, instance=request.user.profile)\n if form.is_valid():\n form.save()\n messages.success(request, \"Akun Anda berhasil di update!\")\n return redirect(\"home:home\")\n else:\n form = ProfileForm(instance=request.user.profile)\n return render(request, \"accounts/update_profile.html\", {\"form\": form})\n\n\n@login_required\ndef affiliate_view(request):\n affiliate_id = request.user.profile.affiliate_id\n affiliate_point = request.user.profile.affiliate_point\n my_affiliate = Registration.objects.filter(affiliate_kode=affiliate_id)\n\n my_affiliate_code = request.user.profile.affiliate_id\n my_downlink = Registration.objects.filter(affiliate_kode=my_affiliate_code)\n my_downlink_fix = my_downlink.filter(Q(status=2) | Q(status=3))\n my_point_history = PointHistory.objects.filter(user=request.user)\n print(my_point_history)\n return render(\n request,\n \"accounts/affiliate_list.html\",\n {\n \"affiliate_id\": affiliate_id,\n \"affiliate_point\": affiliate_point,\n \"my_affiliate\": my_affiliate,\n \"my_downlink\": my_downlink_fix,\n \"my_point_history\": my_point_history,\n },\n )\n","repo_name":"ArRosid/idn-dashboard","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34060927169","text":"from django.core.management.base import BaseCommand\nfrom django.db.models import get_app, get_models\n\n\nclass Command(BaseCommand):\n help = 'Show all models in app and count objects for each of them'\n\n def handle(self, *args, **options):\n app = get_app('testapp')\n for model in get_models(app):\n self.stdout.write(\n 'In model \"{1}\" is {0} objects.'.format(\n model.objects.count(), model._meta.object_name))\n\n self.stderr.write(\n '{2} In model \"{1}\" is {0} objects.'.format(\n model.objects.count(), model._meta.object_name, 'error:'))\n","repo_name":"justnewbie/TestCase42","sub_path":"testapp/management/commands/objectscounting.py","file_name":"objectscounting.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18500654446","text":"from all_funcs import save_moon_trace\nfrom multiprocessing import Pool\nfrom timeit import default_timer as timer\n\n# print(\"Start Unparallel\")\n# start = timer()\n# all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon', 'Triton']\n# num_fieldlines = 0\n# num_orbit_points = 2\n\n# for moon in all_moons:\n# if moon == 'Triton':\n# num_orbits = 2\n# else:\n# num_orbits = 1\n# save_moon_trace(moon, num_orbit_points, num_orbits, num_fieldlines)\n\n# print(\"End Unparallel\")\n# end = timer()\n# print(end-start)\n\ndef run(moon):\n num_fieldlines = 0\n num_orbit_points = 500\n if moon == 'Triton':\n num_orbits = 2\n else:\n num_orbits = 1\n save_moon_trace(moon, num_orbit_points, num_orbits, num_fieldlines)\n\ndef main():\n all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon', 'Triton']\n print(\"Start Parallel\")\n start = timer()\n with Pool() as pool:\n pool.map(run, all_moons)\n end = timer()\n print(\"End Parallel\")\n print(end-start)\n\n# if __name__ == '__main__':\n# main()","repo_name":"jjwindow/PyFields","sub_path":"high_res_all_moons.py","file_name":"high_res_all_moons.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29660961323","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 30 20:12:34 2018\r\n\r\n@author: lijie\r\n\"\"\"\r\nfrom collections import deque\r\n\r\ndef bfs_ergodic(name):\r\n \r\n '''create a queue using the graph'''\r\n search_queue=deque()\r\n search_queue+=graph[name]\r\n searched=[]\r\n \r\n '''bfs implementation'''\r\n while search_queue:\r\n #pop the first persoon in the queue\r\n person=search_queue.popleft()\r\n #Skip the people we have checked\r\n if not person in searched:\r\n if name_ended_m(person):\r\n print(person,'is~')\r\n return True\r\n else:\r\n search_queue+=graph[person]\r\n searched.append(person)\r\n return False\r\n\r\n\r\n#judge if a person's name is ended with 'm'\r\ndef name_ended_m(person):\r\n return person[-1]=='m'\r\n \r\nif __name__=='__main__' :\r\n \r\n '''create a graph'''\r\n #graph is a hash table\r\n graph={}\r\n graph['you']=['alice','bob','claire']\r\n graph['alice']=['peggy']\r\n graph['bob']=['anuj','peggy']\r\n graph['claire']=['tomm','jonny']\r\n graph['anuj']=[]\r\n graph['peggy']=[]\r\n graph['tomm']=[]\r\n graph['jonny']=[]\r\n \r\n bfs_ergodic('you') ","repo_name":"Jerllina/GrokkingAlgorithms_LearningRecords","sub_path":"BreadthFirstSearch.py","file_name":"BreadthFirstSearch.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13024661802","text":"import pygame\nimport math\nimport time\nimport json\n\npygame.init()\n\npygame.display.set_caption(\"Checkers\")\n\nboardSize = 600\nuiSize = 120\nscreenSize = boardSize + uiSize\n\nscreen = pygame.display.set_mode((screenSize, screenSize))\n\nWHITE = (255, 255, 255)\n\nBLACK = (0, 0, 0)\n\nRED = (255, 0, 0)\n\nGRAY = (211, 211, 211)\n\nDARK_GRAY = (105, 105, 105)\n\nsquareSize = boardSize // 8\n\nbpImg = pygame.image.load('./graphics/black-piece.png')\nrpImg = pygame.image.load('./graphics/red-piece.png')\nbkImg = pygame.image.load('./graphics/black-king.png')\nrkImg = pygame.image.load('./graphics/red-king.png')\n\n\ngrid = [[None] * 8 for n in range(8)]\ngameInProgress = False\ncurrentTurn = \"b\"\nselectedChecker = [None, None]\ncapturedPieces = {\"rp\": 0, \"bp\": 0, \"rk\": 0, \"bk\": 0}\nwinner = \"\"\n\n#settings will be put in JSON file and be able to be modified in game later\nforcedCapture = False\nflyingKings = True\nforcedJumps = True\n# config = json.loads(open(\"config.json\", \"r+\"))\n\n#Utility Functions:\n\n#Draw text to the screen \ndef drawTextObject(text, fontSize, color, center, font = \"freesansbold.ttf\"):\n\tfontRenderer = pygame.font.Font(font, fontSize)\n\ttextSurface = fontRenderer.render(text, True, color)\n\ttextRect = textSurface.get_rect()\n\ttextRect.center = center\n\tscreen.blit(textSurface, textRect)\n\ndef drawPieceImg(pieceType, x, y, resizeW = None, resizeH = None):\n\tpieceImg = None\n\tif pieceType is not None:\n\t\tif pieceType == \"bp\":\n\t\t\tpieceImg = bpImg\n\t\telif pieceType == \"rp\":\n\t\t\tpieceImg = rpImg\n\t\telif pieceType == \"bk\":\n\t\t\tpieceImg = bkImg\n\t\telif pieceType == \"rk\":\n\t\t\tpieceImg = rkImg\n\t\tif pieceImg and resizeW and resizeH:\n\t\t\tpieceImg = pygame.transform.scale(pieceImg, (resizeW, resizeH))\n\t\tscreen.blit(pieceImg, (x, y))\n\n#Draws a rect to the screen that can be a simple rect or a button with an onclick action\n#Required Parameters: [x, y, w, h, color, ]\n#Optional Parameters: [text, textSize, textColor, font, borderSize, borderColor, action]\ndef rect(args):\n\tif \"action\" in args:\n\t\tmouse = pygame.mouse.get_pos()\n\t\tclick = pygame.mouse.get_pressed()\n\t\tif args[\"x\"] + args[\"w\"] > mouse[0] > args[\"x\"] and args[\"y\"] + args[\"h\"] > mouse[1] > args[\"y\"]:\n\t\t\tif click[0] == 1:\n\t\t\t\targs[\"action\"]() \n\n\tif \"borderColor\" in args and \"borderSize\" in args:\n\t\tborder = pygame.Rect(args[\"x\"] - args[\"borderSize\"], args[\"y\"] - args[\"borderSize\"], args[\"w\"] + (args[\"borderSize\"] * 2), args[\"h\"] + (args[\"borderSize\"] * 2))\n\t\tpygame.draw.rect(screen, args[\"borderColor\"], border)\n\n\tbox = pygame.Rect(args[\"x\"], args[\"y\"], args[\"w\"], args[\"h\"])\n\tpygame.draw.rect(screen, args[\"color\"], box)\n\n\tif \"text\" in args:\n\t\tif not \"textSize\" in args:\n\t\t\targs[\"textSize\"] = 16\n\t\tif not \"textColor\" in args:\n\t\t\targs[\"textColor\"] = BLACK\n\t\tif not \"font\" in args:\n\t\t\targs[\"font\"] = \"freesansbold.ttf\"\n\t\tdrawTextObject(args[\"text\"], args[\"textSize\"], args[\"textColor\"], box.center, args[\"font\"])\n\n#Initializes a new game by emptying the grid and setting the pieces\ndef startNewGame():\n\tglobal gameInProgress \n\tglobal currentTurn\n\tglobal grid\n\tglobal capturedPieces\n\tgrid = [[None] * 8 for n in range(8)]\n\tcurrentTurn = \"b\"\n\tcapturedPieces = {\"rp\": 0, \"bp\": 0, \"rk\": 0, \"bk\": 0}\n\tfor y in range(len(grid)):\n\t\tfor x in range(len(grid[y])):\n\t\t\tif y < 3 and not (x + y) % 2:\n\t\t\t\tgrid[y][x] = \"bp\"\n\t\t\telif y > 4 and not (x + y) % 2:\n\t\t\t\tgrid[y][x] = \"rp\"\n\tgameInProgress = True\n\n#Draws the checkerboard pattern of the grid\ndef drawGrid():\n\tfor y in range(len(grid)):\n\t\tfor x in range(len(grid[y])):\n\t\t\tcolor = RED if (x + y) % 2 else BLACK\n\t\t\trect({\"x\": x * squareSize, \"y\": y * squareSize, \"w\": squareSize, \"h\": squareSize, \"color\": color})\n\t\t\t\n\n#Draws the pieces using the coordinates of the pieces set in grid\t\ndef drawPieces():\n\tif gameInProgress:\n\t\tfor y in range(len(grid)):\n\t\t\tfor x in range(len(grid[y])):\n\t\t\t\tpiece = grid[y][x]\n\t\t\t\tdrawPieceImg(piece, (x * squareSize) - 1, (y * squareSize) - 1, squareSize, squareSize)\n#Draws the UI \ndef drawUI():\n\t#TODO: Add save and load game buttons \n\t#TODO: Add settings (flying kings, forced capture)\n\t#TODO: Add forfeit button\n\t#Draw newgame button\n\tbtnNGWidth = 100\n\tbtnNGHeight = 35\n\tbtnNGX = (boardSize / 2) - (btnNGWidth / 2)\n\tbtnNGY = boardSize + ((screenSize - boardSize) / 2) - (btnNGHeight / 2)\n\trect({\"x\": btnNGX, \"y\": btnNGY, \"w\": btnNGWidth, \"h\": btnNGHeight, \"color\": GRAY, \"text\": \"New Game\", \"borderSize\": 2, \"borderColor\": BLACK, \"action\": startNewGame})\n\t#All UI other than New Game button should be displayed after game starts\n\tif gameInProgress:\n\t#Draw current turn\n\t\tdrawTextObject(\"Current Turn: \", 16, BLACK, ((boardSize + (screenSize - boardSize) / 2), 50))\n\t\tcolorRectX = (boardSize + (screenSize - boardSize) / 2) - 25\n\t\tcolorRectY = 75\n\t\tcolor = RED if currentTurn == \"r\" else BLACK\n\t\trect({\"x\": colorRectX, \"y\": colorRectY, \"w\": 50, \"h\": 50, \"color\": color})\n\t#Draw captured piece box\n\t\tdrawTextObject(\"Captured \", 16, BLACK, ((boardSize + (screenSize - boardSize) / 2), 150))\n\t\tdrawTextObject(\"Pieces: \", 16, BLACK, ((boardSize + (screenSize - boardSize) / 2), 170))\n\t\tcapturedBoxX = (boardSize + (screenSize - boardSize) / 2) - 43\n\t\trect({\"x\": capturedBoxX, \"y\": 202, \"w\": 86, \"h\": 396, \"color\": GRAY, \"borderSize\": 2, \"borderColor\": DARK_GRAY})\n\t\n\t#Draw captured pieces\n\t\tcapturedPiecesX = (boardSize + (screenSize - boardSize) / 2)\n\t\tfor pieceType, pieceCount in capturedPieces.items():\n\t\t\tif pieceCount > 0:\n\t\t\t\tpieceY = 200 if pieceType == \"rp\" else 510 \n\t\t\t\tdrawPieceImg(pieceType, capturedPiecesX - 35, pieceY, 70, 70)\n\t\t\tif pieceCount > 1: \n\t\t\t\ttextY = 285 if pieceType == \"rp\" else 590\n\t\t\t\tdrawTextObject(\"x\" + str(pieceCount), 16, BLACK, (capturedPiecesX, textY))\n#Handles movement of pieces\ndef handleMovement(event):\n\tglobal grid\n\tglobal selectedChecker\n\tglobal currentTurn\n\tglobal capturedPieces\n\tif gameInProgress:\n\t\t#Make sure their selection is within the board\n\t\tif boardSize > pygame.mouse.get_pos()[0] > 0 and boardSize > pygame.mouse.get_pos()[1] > 0:\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n\t\t\t\tselectedChecker[0] = math.ceil(pygame.mouse.get_pos()[0] / squareSize) - 1\n\t\t\t\tselectedChecker[1] = math.ceil(pygame.mouse.get_pos()[1] / squareSize) - 1\n\t\t\t\t#If the checker is owned by the current turn\n\t\t\t\tif grid[selectedChecker[1]][selectedChecker[0]] == currentTurn + \"p\" or grid[selectedChecker[1]][selectedChecker[0]] == currentTurn + \"k\":\n\t\t\t\t\t#This will be used later for other stuff that is done on mousedown\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tselectedChecker = [None, None]\n\t\t\t#If the mousebutton is being released\n\t\t\tif event.type == pygame.MOUSEBUTTONUP and selectedChecker != [None, None]:\n\t\t\t\tmoveToX = math.ceil(pygame.mouse.get_pos()[0] / squareSize) - 1\n\t\t\t\tmoveToY = math.ceil(pygame.mouse.get_pos()[1] / squareSize) - 1\n\t\t\t\t#If it wasn't dropped on the same spot\n\t\t\t\tif moveToX != selectedChecker[0] and moveToY != selectedChecker[1]:\n\t\t\t\t\t#If the square the checker is moving to is on the black squares\n\t\t\t\t\tif not (moveToX + moveToY) % 2:\n\t\t\t\t\t\tvalidMove = False\n\t\t\t\t\t\txOffset = 0\n\t\t\t\t\t\tyOffset = 0\n\t\t\t\t\t\t#If the piece is not trying to capture one of its own\n\t\t\t\t\t\tif grid[moveToY][moveToX] != currentTurn + \"p\" and grid[moveToY][moveToX] != currentTurn + \"k\":\n\t\t\t\t\t\t\t#If the selected piece is a normal piece\n\t\t\t\t\t\t\tif grid[selectedChecker[1]][selectedChecker[0]][-1] == \"p\":\n\t\t\t\t\t\t\t\t#If there is no piece at the square the checker is moving to\n\t\t\t\t\t\t\t\t#Or if forcedJumps is disabled it doesnt matter if there is a piece\n\t\t\t\t\t\t\t\tif grid[moveToY][moveToX] == None or not forcedJumps:\n\t\t\t\t\t\t\t\t\t#Depending on the current turn it checks if the checker is moving to\n\t\t\t\t\t\t\t\t\t# The two diagonal spots in front of it\t\n\t\t\t\t\t\t\t\t\tif currentTurn == \"r\":\n\t\t\t\t\t\t\t\t\t\tif (selectedChecker[0] + 1 == moveToX or selectedChecker[0] - 1 == moveToX) and selectedChecker[1] - 1 == moveToY:\n\t\t\t\t\t\t\t\t\t\t\tvalidMove = True\n\t\t\t\t\t\t\t\t\telif currentTurn == \"b\":\n\t\t\t\t\t\t\t\t\t\tif (selectedChecker[0] + 1 == moveToX or selectedChecker[0] - 1 == moveToX) and selectedChecker[1] + 1 == moveToY:\n\t\t\t\t\t\t\t\t\t\t\tvalidMove = True\n\t\t\t\t\t\t\t\t#If there is a piece at the spot the checker is trying to move to and the checker has to jump\n\t\t\t\t\t\t\t\tif forcedJumps and grid[moveToY][moveToX]:\n\t\t\t\t\t\t\t\t\t#Determine the X and Y offset which is the direction the spot is to the position of the checker\n\t\t\t\t\t\t\t\t\tif moveToX > selectedChecker[0]:\n\t\t\t\t\t\t\t\t\t\txOffset = 1\n\t\t\t\t\t\t\t\t\tif moveToX < selectedChecker[0]:\n\t\t\t\t\t\t\t\t\t\txOffset = -1 \n\t\t\t\t\t\t\t\t\tif moveToY > selectedChecker[1]:\n\t\t\t\t\t\t\t\t\t\tyOffset = 1\n\t\t\t\t\t\t\t\t\tif moveToY < selectedChecker[1]:\n\t\t\t\t\t\t\t\t\t\tyOffset = -1\n\t\t\t\t\t\t\t\t\t#If the spot after the spot the checker is moving to is on the board\n\t\t\t\t\t\t\t\t\tif 7 > moveToX + xOffset > 0 and 7 > moveToY + yOffset > 0:\n\t\t\t\t\t\t\t\t\t\t#If the spot after the spot the checker is moving to is empty\n\t\t\t\t\t\t\t\t\t\tif not grid[moveToY + yOffset][moveToX + xOffset]:\n\t\t\t\t\t\t\t\t\t\t\tmoveToY = moveToY + yOffset\n\t\t\t\t\t\t\t\t\t\t\tmoveToX = moveToX + xOffset\n\t\t\t\t\t\t\t\t\t\t\tvalidMove = True\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t#If the spot is off the board then \n\t\t\t\t\t\t\t\t\t\t#its ok to capture the piece even if its not technically jumping\n\t\t\t\t\t\t\t\t\t\tvalidMove = True\n\t\t\t\t\t\t\t#If the piece is a king\n\t\t\t\t\t\t\telif grid[selectedChecker[1]][selectedChecker[0]][-1] == \"k\":\n\t\t\t\t\t\t\t\t#If the settings allow kings to go any distance instead of just 1 square\n\t\t\t\t\t\t\t\tif flyingKings:\n\t\t\t\t\t\t\t\t\txOffset = 0\n\t\t\t\t\t\t\t\t\tyOffset = 0\n\t\t\t\t\t\t\t\t\tif moveToX > selectedChecker[0]:\n\t\t\t\t\t\t\t\t\t\txOffset = 1\n\t\t\t\t\t\t\t\t\tif moveToX < selectedChecker[0]:\n\t\t\t\t\t\t\t\t\t\txOffset = -1 \n\t\t\t\t\t\t\t\t\tif moveToY > selectedChecker[1]:\n\t\t\t\t\t\t\t\t\t\tyOffset = 1\n\t\t\t\t\t\t\t\t\tif moveToY < selectedChecker[1]:\n\t\t\t\t\t\t\t\t\t\tyOffset = -1 \n\t\t\t\t\t\t\t\t\t#If the king is moving on a diagonal\n\t\t\t\t\t\t\t\t\tif xOffset != 0 and yOffset != 0:\n\t\t\t\t\t\t\t\t\t\t#Check if there is a piece in the way\n\t\t\t\t\t\t\t\t\t\tx = selectedChecker[0] + xOffset\n\t\t\t\t\t\t\t\t\t\tfor y in range(selectedChecker[1] + yOffset, moveToY):\n\t\t\t\t\t\t\t\t\t\t\tif grid[y][x] != None:\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\tx += xOffset\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tif (selectedChecker[0] + 1 == moveToX or selectedChecker[0] - 1 == moveToX) and (selectedChecker[1] - 1 == moveToY or selectedChecker[1] + 1 == moveToY):\n\t\t\t\t\t\t\t\t\t\tvalidMove = True\n\t\t\t\t\t\tif validMove:\n\t\t\t\t\t\t\t#If there is an opponent piece in the spot the selected piece is moving to, and forcedJumps = False\n\t\t\t\t\t\t\t#Or if the spot to the left/right of the opponent piece is within boundaries and forcedJumps\n\t\t\t\t\t\t\tif grid[moveToY][moveToX] and (not forcedJumps or ((7 > moveToX + xOffset > 0 and 7 > moveToY + yOffset > 0) and forcedJumps)):\n\t\t\t\t\t\t\t\tcapturedPieces[grid[moveToY][moveToX]] += 1\n\t\t\t\t\t\t\t\tgrid[moveToY][moveToX] = None\n\t\t\t\t\t\t\telif not grid[moveToY][moveToX] and grid[selectedChecker[1]][selectedChecker[0]][-1] == \"p\" and grid[moveToY - yOffset][moveToX - xOffset]:\n\t\t\t\t\t\t\t\tcapturedPieces[grid[moveToY - yOffset][moveToX - xOffset]] += 1\n\t\t\t\t\t\t\t\tgrid[moveToY - yOffset][moveToX - xOffset] = None\n\t\t\t\t\t\t\tif currentTurn == \"r\" and moveToY == 0:\n\t\t\t\t\t\t\t\tgrid[moveToY][moveToX] = \"rk\"\n\t\t\t\t\t\t\telif currentTurn == \"b\" and moveToY == 7:\n\t\t\t\t\t\t\t\tgrid[moveToY][moveToX] = \"bk\"\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tgrid[moveToY][moveToX] = grid[selectedChecker[1]][selectedChecker[0]]\n\t\t\t\t\t\t\tgrid[selectedChecker[1]][selectedChecker[0]] = None\n\t\t\t\t\t\t\tcurrentTurn = \"r\" if currentTurn == \"b\" else \"b\"\n\t\t\t\tselectedChecker = [None, None]\n\t\t\t\n\t\t\t# if event.type == pygame.MOUSEMOTION and selectedChecker != [None, None]:\n\t\t\t# \tprint(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n\t\t\t# \tdrawPieceImg(grid[selectedChecker[1]][selectedChecker[0]], pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1], squareSize, squareSize)\n\nrunning = True\nclock = pygame.time.Clock()\nwhile running:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trunning = False\n\t\tif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n\t\t\trunning = False\n\t\tif event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.MOUSEBUTTONUP: #or pygame.MOUSEMOTION:\n\t\t\thandleMovement(event)\n\tscreen.fill(WHITE)\n\tdrawGrid()\n\tdrawPieces()\n\tdrawUI()\n\tpygame.display.flip()\n\tclock.tick(15)\npygame.quit()","repo_name":"PixelSoftwareStudios/PyCheckers","sub_path":"checkers.py","file_name":"checkers.py","file_ext":"py","file_size_in_byte":11982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26659340966","text":"from sqlalchemy import orm\nfrom sqlalchemy import create_engine\nfrom wallet.entity.wallet_entity import WalletEntity\nfrom sqlalchemy.orm import mapper\nfrom sqlalchemy import Table, MetaData, Column , String\nimport os\n\n\nclass WalletService:\n\n def __init__(self):\n self._engine = create_engine('mysql+pymysql://%s:%s@%s/%s' %(os.environ['DB_USERNAME'],os.environ['DB_PASSWORD'],os.environ['DB_ENDPOINT_ADDRESS'],os.environ['DB_NAME']))\n self._session = orm.Session(bind=self._engine)\n wallet = Table('wallet', MetaData(),\n Column('uuid', String, primary_key=True),\n Column('user_id')\n )\n mapper(WalletEntity, wallet)\n\n def list(self):\n return self._session.query(WalletEntity).all()\n\n def create(self, wallet_entity):\n self._session.add(wallet_entity)\n self._session.commit()\n return wallet_entity\n\n def delete(self, uuid):\n wallet_entity = self._session.query(WalletEntity).get(uuid)\n if(wallet_entity == None):\n raise Exception(\"No wallet found for %s \" % (uuid))\n self._session.delete(wallet_entity)\n self._session.commit()\n return wallet_entity\n\n\n","repo_name":"momokeith/wallet-ld","sub_path":"wallet/service/wallet_service.py","file_name":"wallet_service.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"51127959","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport six\n\nif six.PY2:\n import mock\nelse:\n import unittest.mock as mock\n\nfrom sqlalchemy.orm import Session, Query\n\nfrom search.priemka.yappy.sqla.yappy import model\nfrom search.priemka.yappy.src.yappy_lib.queue.exceptions import DBPartitionException\nfrom search.priemka.yappy.src.yappy_lib.queue.partitioned import (\n PartitionedDBSink,\n PartitionedDBSource,\n PartitionedMessage,\n)\nfrom search.priemka.yappy.tests.test_yappy_lib.test_queue.test_base import (\n _SourceQueueTestCase as SourceQueueTestCase,\n SinkQueueTestCase,\n)\n\n\nclass PartitionedSinkTestCase(SinkQueueTestCase):\n queue = None # type: PartitionedDBSink\n\n class TestSink(PartitionedDBSink):\n MODEL = model.Beta\n\n @classmethod\n def set_up_queue(cls):\n with mock.patch.object(cls.TestSink.partitions_manager, 'start'):\n cls.queue = cls.TestSink()\n\n def setUp(self):\n super(PartitionedSinkTestCase, self).setUp()\n self.cluster_revision_patch = mock.patch.object(\n self.queue.partitions_manager.__class__,\n 'cluster_revision',\n new_callable=mock.PropertyMock,\n )\n self.cluster_revision = self.cluster_revision_patch.start()\n self.commit_patch = mock.patch.object(self.queue, 'commit_changes')\n self.commit = self.commit_patch.start()\n\n self.addCleanup(self.cluster_revision_patch.stop)\n self.addCleanup(self.commit_patch.stop)\n\n def test_verify_revision_false(self):\n self.cluster_revision.return_value = 3\n msg = PartitionedMessage('content', revision=2)\n self.assertFalse(self.queue.verify_message_revision(msg))\n\n def test_verify_revision_true(self):\n self.cluster_revision.return_value = 2\n msg = PartitionedMessage('content', revision=2)\n self.assertTrue(self.queue.verify_message_revision(msg))\n\n def test_save_message_dont_save_if_not_verified(self):\n with mock.patch.object(self.queue, 'verify_message_revision', return_value=False):\n self.queue._save_message(PartitionedMessage('content', revision=2))\n self.save_message.assert_not_called()\n\n def test_save_message(self):\n manager = mock.Mock()\n manager.attach_mock(self.save_message, 'save')\n manager.attach_mock(self.commit, 'commit')\n msg = PartitionedMessage('content', revision=2)\n with mock.patch.object(self.queue, 'verify_message_revision', return_value=True):\n self.queue._save_message(msg)\n manager.assert_has_calls([mock.call.save(msg, mock.ANY), mock.call.commit(mock.ANY, msg)])\n\n def test_commit_not_verified(self):\n self.commit_patch.stop()\n session = mock.Mock(Session)\n with mock.patch.object(self.queue, 'verify_message_revision', return_value=False):\n committed = self.queue.commit_changes(session, PartitionedMessage('content', revision=2))\n self.assertFalse(committed)\n\n def test_commit_verified(self):\n self.commit_patch.stop()\n session = mock.Mock(Session)\n with mock.patch.object(self.queue, 'verify_message_revision', return_value=True):\n committed = self.queue.commit_changes(session, PartitionedMessage('content', revision=2))\n self.assertTrue(committed)\n\n\nclass PartitionedSourceTestCase(SourceQueueTestCase):\n queue = None # type: PartitionedDBSource\n\n class DummyPartitionException(DBPartitionException):\n \"\"\" Dummy exception to use in tests \"\"\"\n\n class TestSource(PartitionedDBSource):\n MODEL = model.Beta\n\n @classmethod\n def set_up_queue(cls):\n with mock.patch.object(cls.TestSource.partitions_manager, 'start'):\n cls.queue = cls.TestSource()\n\n def setUp(self):\n super(PartitionedSourceTestCase, self).setUp()\n self.cluster_revision_patch = mock.patch.object(\n self.queue.partitions_manager.__class__,\n 'cluster_revision',\n new_callable=mock.PropertyMock,\n )\n self.cluster_revision = self.cluster_revision_patch.start()\n self.apply_filters_patch = mock.patch.object(self.queue.partitions_manager, 'apply_partition_filters')\n self.apply_filters = self.apply_filters_patch.start()\n\n self.addCleanup(self.cluster_revision_patch.stop)\n self.addCleanup(self.apply_filters_patch.stop)\n\n def test_poll_partitioning_error(self):\n self.get_data.side_effect = self.DummyPartitionException\n expected = []\n self.assertEqual(self.queue.poll(1), expected)\n\n def test_poll_message_revision(self):\n self.cluster_revision.return_value = 7\n self.get_data.return_value = ['content-1', 'content-2']\n result = self.queue.poll(10)\n revisions = [msg.revision for msg in result]\n self.assertEqual(revisions, [7] * len(self.get_data.return_value))\n\n def test_get_query(self):\n q = mock.Mock(Query)\n filtered_q = mock.Mock(Query)\n session = mock.Mock(Session)\n self.get_query.return_value = q\n def apply_filters(q_):\n if q == q_:\n return filtered_q\n self.apply_filters.side_effect = apply_filters\n result = self.queue._get_query(session)\n self.assertEqual(result, filtered_q)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"Search engine/test_queue/test_partitioned.py","file_name":"test_partitioned.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37040761218","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\n\nimport os\nfrom tqdm import tqdm\nfrom visdom import Visdom\nimport numpy as np\n\nfrom torch.autograd import Variable\nfrom torch.autograd import Function\nimport torch.backends.cudnn as cudnn\n\nfrom dataset import SiameseWhaleDataset\nfrom model import SiameseNetwork\nfrom config import Configure\nfrom utils import PairwiseDistance, DlibLoss, ContrastiveLoss\n\n\n# Global config\nconfig = Configure()\n\ntransform = transforms.Compose([\n # Scale(96),\n transforms.Resize((224, 224)),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize(mean = [ 0.5, 0.5, 0.5 ],\n std = [ 0.5, 0.5, 0.5 ])\n ])\n\n# Train_dir\n# Number of training example: n_batch * n_cls * n_same\ntrain_dir = SiameseWhaleDataset(config.dataroot, config.n_batch, config.n_cls, config.n_same, transform)\ntrain_loader = torch.utils.data.DataLoader(train_dir,\n batch_size=config.n_cls * config.n_same, shuffle=False)\n\nl2_dist = PairwiseDistance(2)\n\ndef main():\n global plotter\n plotter = VisdomLinePlotter(env_name=config.visdom_name)\n \n # instantiate model and initialize weights\n model = SiameseNetwork()\n if config.cuda:\n model.cuda()\n\n optimizer = create_optimizer(model, config.lr)\n\n # optionally resume from a checkpoint\n if config.resume:\n if os.path.isfile(config.resume):\n print('=> loading checkpoint {}'.format(config.resume))\n checkpoint = torch.load(config.resume)\n config.start_epoch = checkpoint['epoch']\n checkpoint = torch.load(config.resume)\n model.load_state_dict(checkpoint['state_dict'])\n else:\n print('=> no checkpoint found at {}'.format(config.resume))\n\n start = config.start_epoch\n end = start + config.epochs\n\n for epoch in range(start, end):\n train(train_loader, model, optimizer, epoch)\n\ndef train(train_loader, model, optimizer, epoch):\n model.train()\n # loss_function = DlibLoss()\n loss_function = ContrastiveLoss()\n # pbar = tqdm(enumerate(train_loader))\n\n for batch_idx, (data_a, data_p, c) in enumerate(train_loader):\n data_a, data_p, c = data_a.cuda(), data_p.cuda(), c.cuda()\n data_a, data_p, c = Variable(data_a), Variable(data_p), Variable(c)\n\n out_a, out_p = model(data_a), model(data_p)\n loss = loss_function(out_a, out_p, c)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # update the optimizer learning rate\n adjust_learning_rate(optimizer)\n \n plotter.plot('loss', 'train', epoch * config.n_batch + batch_idx, loss.data[0])\n\n if (epoch * config.n_batch + batch_idx) % config.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch,\n batch_idx * len(data_a),\n len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss.data[0]\n ))\n torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict()},\n '{}/checkpoint_{}.pth'.format(config.log_dir, epoch))\n\ndef adjust_learning_rate(optimizer):\n \"\"\"Updates the learning rate given the learning rate decay.\n The routine has been implemented according to the original Lua SGD optimizer\n \"\"\"\n for group in optimizer.param_groups:\n if 'step' not in group:\n group['step'] = 0\n group['step'] += 1\n\n group['lr'] = config.lr / (1 + group['step'] * config.lr_decay)\n\ndef create_optimizer(model, new_lr):\n # setup optimizer\n if config.optimizer == 'sgd':\n optimizer = optim.SGD(model.parameters(), lr=new_lr,\n momentum=0.9, dampening=0.9,\n weight_decay=config.wd)\n elif config.optimizer == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=new_lr,\n weight_decay=config.wd)\n elif config.optimizer == 'adagrad':\n optimizer = optim.Adagrad(model.parameters(),\n lr=new_lr,\n lr_decay=config.lr_decay,\n weight_decay=config.wd)\n return optimizer\n\nclass VisdomLinePlotter(object):\n \"\"\"Plots to Visdom\"\"\"\n def __init__(self, env_name='main'):\n self.viz = Visdom()\n self.env = env_name\n self.plots = {}\n def plot(self, var_name, split_name, x, y):\n if var_name not in self.plots:\n self.plots[var_name] = self.viz.line(X=np.array([x,x]), Y=np.array([y,y]), env=self.env, opts=dict(\n legend=[split_name],\n title=var_name,\n xlabel='steps',\n ylabel=var_name\n ))\n else:\n self.viz.updateTrace(X=np.array([x]), Y=np.array([y]), env=self.env, win=self.plots[var_name], name=split_name)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ngxbac/siamese_network","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17106779840","text":"from contextlib import ExitStack\nfrom os import linesep\nfrom pathlib import Path\nfrom typing import Iterable\n\n\ndef paste(*lines: Iterable[str], delimiter: str = \"\\t\"):\n return (\n linesep.join(map(lambda l: delimiter.join(map(lambda w: w.strip(), l)), zip(*lines)))\n + linesep\n )\n\n\nif __name__ == \"__main__\":\n filename = str((Path(__file__) / \"../../hightemp.txt\").resolve())\n\n workdir = Path(__file__).parent\n with ExitStack() as stack:\n col1, col2 = map(\n lambda i: stack.enter_context(\n open(str((workdir / f\"../_12/col{i}.txt\").resolve()), \"r\")\n ),\n [1, 2],\n )\n\n file = stack.enter_context(open(str((workdir / \"result.txt\").resolve()), \"w\"))\n\n file.write(paste(col1, col2))\n","repo_name":"kamiazya/nlp100","sub_path":"src/ch2/_13/paste.py","file_name":"paste.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25759197030","text":"# import complex math module\nimport cmath\n\ndef calculate_equation_roots(a,b,c):\n \"\"\"\n calculate the roots of equation ax^2 + bx + c\n :param a: equation ax^2 + bx + c\n :param b: equation ax^2 + bx + c\n :param c: equation ax^2 + bx + c\n :return: the roots of equation ax^2 + bx + c or empty array if equation has no real roots\n \"\"\"\n \n a = float(a)\n b = float(b)\n c = float(c)\n \n # calculate the discriminant\n d = (b**2) - (4*a*c)\n \n x1 = (-b+cmath.sqrt(d))/(2*a)\n x2 = (-b-cmath.sqrt(d))/(2*a)\n\n # return null if the imaginary part of x1 and x2 are != 0.0\n if x1.imag != 0.0 and x2.imag != 0.0:\n return []\n \n # return the solutions\n return [x1.real, x2.real]\n\nprint(calculate_equation_roots(4,3,2))","repo_name":"wellingtonj1/Clarimar-AED1","sub_path":"exerc2.py","file_name":"exerc2.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26340178026","text":"import altair as alt\nfrom pathlib import Path\nimport pandas as pd\nfrom vega_datasets import data\n\n\n# load data from local computer\nairport_data = Path('/Users/oluwadara/PycharmProjects/scientificProject/airports.dat.txt')\nairline_data = Path('/Users/oluwadara/PycharmProjects/scientificProject/airlines.dat.txt')\nroute_data = Path('/Users/oluwadara/PycharmProjects/scientificProject/routes.dat.txt')\n\n\n# load data into dataframe`````\nairline_df = pd.read_csv(airline_data, header=None, names=['airlineId', 'name',\n 'alias', 'IATA', 'ICAO',\n 'callsign', 'country_airline', 'active'])\nairport_df = pd.read_csv(airport_data, header=None, names=['airportId', 'airportName', 'city',\n 'country_airport', 'IATA', 'ICAO',\n 'lat', 'long', 'altitude',\n 'timezone', 'dst', 'tbase',\n 'type', 'source'])\nairport_df['altitude'] = pd.to_numeric(airport_df['altitude'], errors='coerce')\nroute_df = pd.read_csv(route_data, header=None, names=['airlineCode', 'airlineId',\n 'sourcePort', 'sourcePortId',\n 'destPort', 'destPortId',\n 'codeShare', 'stops', 'equip'])\n\ncountries = alt.topo_feature(data.world_110m.url, 'countries')\n\n# panda settings to display all columns in console\npd.set_option('display.expand_frame_repr', False)\n\n# disable the default 5000 rows limit\nalt.data_transformers.disable_max_rows()\n\n# convert airportId from int to string.\nairline_df['airlineId'] = airline_df['airlineId'].astype(str)\nairport_df['airportId'] = airport_df['airportId'].astype(str)\n\n# merged three data together.\nmerged_df = pd.merge(airline_df, route_df, on='airlineId', how='inner')\nmerged_merged_df = pd.merge(merged_df, airport_df,\n left_on='sourcePortId', right_on='airportId', how='inner')\n\n# states = alt.topo_feature(countries, 'countries')\nsource = alt.topo_feature(data.world_110m.url, 'countries')\n\n# Define a color scale based on altitude\ncolor_scale = alt.Scale(\n domain=(0, merged_merged_df['altitude'].max()),\n)\n\nbackground = alt.Chart(source).mark_geoshape(\n color=alt.Color('altitude:Q', scale=color_scale),\n fill=alt.Color('country_airport:N', scale=alt.Scale(scheme='reds')),\n stroke='white',\n).project('naturalEarth1').properties(\n width=1100,\n height=700,\n title='DISTRIBUTION OF AIRPORTS IN THE WORLD'\n).properties(width=500, height=300)\n\npoints = alt.Chart(merged_merged_df).mark_circle().encode(\n longitude='long:Q',\n latitude='lat:Q',\n size=alt.value(10),\n tooltip=['airportName', 'city', 'country_airport'],\n color=alt.Color('altitude:Q', scale=color_scale, legend=None)\n)\n\nchart = background + points\nchart.show()\n\n\n\n\n\n","repo_name":"Adebobola-Oluwadara/Data-Analysis-AirportData","sub_path":"data/Vis2.py","file_name":"Vis2.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18481636054","text":"import pymongo\nfrom pymongo import MongoClient\nfrom dotenv import load_dotenv\nimport os\nfrom pprint import pprint\nimport json\nimport sys\n\nload_dotenv()\nmongo_host = os.getenv(\"MONGO_HOST\")\nmongo_port = os.getenv(\"MONGO_PORT\")\nclient = MongoClient(mongo_host, int(mongo_port))\n\ntype_table = {\n \"float\": \"number\",\n \"str\": \"string\",\n \"int\": \"number\",\n \"bool\": \"boolean\",\n \"ObjectId\": \"string\",\n \"datetime\": \"string\",\n \"NoneType\": \"string\",\n}\n\n\ndef translate(value, table):\n if not table:\n return value\n for key, _value in table.items():\n if value == key:\n value = _value\n break\n return value\n\n\ndef handle_scalar(value):\n raw_type = type(value).__name__\n translated_type = translate(raw_type, type_table)\n field_schema = {\"type\": translated_type}\n if raw_type == \"datetime\":\n field_schema.update(format=\"date-time\")\n return field_schema\n\n\ndef build_schema(doc):\n out = {\"type\": \"object\", \"properties\": {}}\n properties = out.get(\"properties\")\n for key, value in doc.items():\n if type(value) == dict:\n properties.update({key: build_schema(value)})\n elif type(value) == list:\n if not value:\n continue\n if type(value[0]) not in [list, dict]:\n field_schema = handle_scalar(value[0])\n properties.update({key: field_schema})\n else:\n properties.update(\n {key: {\"type\": \"array\", \"items\": build_schema(value[0])}}\n )\n else:\n field_schema = handle_scalar(value)\n properties.update({key: field_schema})\n return out\n\n\ndef generate_catalog(database_name, *selected_collections):\n database = getattr(client, database_name)\n stream = []\n if not selected_collections:\n selected_collections = database.list_collection_names()\n print(selected_collections)\n for collection in selected_collections:\n c = getattr(database, collection)\n if not c.count_documents({}):\n continue\n r = c.find().sort([(\"date_created\", pymongo.DESCENDING)])[0]\n schema = build_schema(r)\n pprint(schema)\n stream_entry = {\n \"table_name\": collection,\n \"stream\": collection,\n \"metadata\": [\n {\n \"breadcrumb\": [],\n \"metadata\": {\n \"table-key-properties\": [\"_id\"],\n \"database-name\": database_name,\n \"row-count\": c.count_documents({}),\n \"is-view\": False,\n \"valid-replication-keys\": [\"_id\"],\n \"selected\": True,\n \"replication-method\": \"LOG_BASED\",\n },\n }\n ],\n \"tap_stream_id\": f\"{database_name}-{collection}\",\n \"schema\": schema,\n \"selected\": True,\n }\n stream.append(stream_entry)\n extractor_catalog_path = os.path.join(\n os.getcwd(), database_name, \"extract\", \"tap-mongodb.catalog.json\"\n )\n try:\n with open(extractor_catalog_path, \"w\") as e:\n catalog = {\"stream\": stream}\n e.write(json.dumps(catalog))\n except:\n pass\n\n\ndef generate_catalogs(database_name, *selected_collections):\n print(database_name)\n if not database_name or database_name == \"__all__\":\n dbs = client.list_database_names()\n for db in dbs:\n if \"sendbox\" in db:\n generate_catalog(db)\n else:\n generate_catalog(database_name, *selected_collections)\n\n\ndef delete_catalogs(*database_names):\n if len(database_names) == 1 and database_names[0] == \"__all__\":\n database_names = [d for d in client.list_database_names()]\n for d in database_names:\n cat_path = os.path.join(os.getcwd(), d, \"extract\", \"tap-mongodb.catalog.json\")\n if os.path.exists(cat_path):\n os.remove(cat_path)\n\n\nif __name__ == \"__main__\":\n # generate_catalog()\n thismodule = sys.modules[__name__]\n argv = sys.argv\n if len(argv) > 1:\n func = getattr(thismodule, argv[1])\n if len(argv) > 2:\n func(*argv[2:])\n else:\n func()\n else:\n print(\"Please pass function in argv\")","repo_name":"aminuolawale/elt-pipeline","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42566984330","text":"import json\nfrom tqdm import tqdm\n\ndef replace_words():\n with open(\"data/arabic.json\", \"r\", encoding='utf8') as file:\n all_paragraphs = json.load(file)\n \n for i in tqdm(range(len(all_paragraphs))):\n all_paragraphs[i][\"paragraph\"] = all_paragraphs[i][\"paragraph\"].replace(\"تعديل - تعديل مصدري - تعديل ويكي بيانات\", \"\").strip()\n to_dump = json.dumps(all_paragraphs, indent=4, ensure_ascii=False)\n with open(\"data/arabic.json\", \"w\", encoding='utf8') as file:\n file.write(to_dump)\n\ndef remove_paragraphs(remove_idx):\n with open(\"data/arabic.json\", \"r\", encoding='utf8') as file:\n all_paragraphs = json.load(file)\n \n res = []\n for i in tqdm(range(len(all_paragraphs))):\n if i in remove_idx:\n continue\n res.append(all_paragraphs[i])\n\n to_dump = json.dumps(all_paragraphs, indent=4, ensure_ascii=False)\n with open(\"data/arabic_revised.json\", \"w\", encoding='utf8') as file:\n file.write(to_dump)\n\ndef merge_data(file_list, name):\n paragraphs_list = []\n for file in file_list:\n with open(file, \"r\", encoding='utf8') as file:\n data = json.load(file)\n print(len(data))\n paragraphs_list += data\n to_dump = json.dumps(paragraphs_list, indent=4, ensure_ascii=False)\n with open(name, \"w\", encoding='utf8') as file:\n file.write(to_dump)\n\ndef remove_duplicated():\n with open(\"data/arabic_revised.json\", \"r\", encoding='utf8') as file:\n data = json.load(file)\n with open(\"data/translated/train_arabic_raw.json\", \"r\", encoding='utf8') as file:\n translated = json.load(file)\n \n chk = set()\n duplicated = set()\n for i in range(7519):\n if data[i]['paragraph'] in chk:\n duplicated.add(i)\n else:\n chk.add(data[i]['paragraph'])\n \n res = []\n for i in tqdm(range(len(translated))):\n if i in duplicated:\n continue\n \n res.append(translated[i])\n if len(res) > 6400:\n if i < 6897:\n res[-1]['translator'] = \"facebook/mbart-large-50-many-to-one-mmt\"\n else:\n res[-1]['translator'] = \"Helsinki-NLP/opus-mt-ar-en\"\n print(len(res))\n print(len(translated))\n print(len(duplicated))\n to_dump = json.dumps(res, indent=4, ensure_ascii=False)\n with open(\"data/translated/train_arabic_raw.json\", \"w\", encoding='utf8') as file:\n file.write(to_dump)\n\ndef parsing_dataset():\n with open(\"data/translated/train_arabic_raw.json\", \"r\", encoding='utf8') as file:\n translated = json.load(file)\n\n # chk = set()\n # for i in range(len(translated)):\n # para_len = len(translated[i]['paragraph'].split())\n # if para_len > 300:\n # chk.add(i)\n \n # from transformers import AutoTokenizer\n # tokenizer = AutoTokenizer.from_pretrained(\"Helsinki-NLP/opus-mt-ar-en\")\n \n # from nltk import tokenize\n # sentences = tokenize.sent_tokenize(translated[0]['paragraph'])\n # for s in sentences:\n # print(len(s))\n # print(len(translated[0]['paragraph'].split()))\n \n train = []\n test = []\n i = 0\n for i in range(len(translated)):\n if len(translated[i]['paragraph'].split()) < 240:\n continue\n \n if len(train) == 6400:\n test.append(translated[i])\n if len(test) == 700:\n break\n continue\n train.append(translated[i])\n \n print(len(train))\n print(len(test))\n \n train_dump = json.dumps(train, indent=4, ensure_ascii=False)\n test_dump = json.dumps(test, indent=4, ensure_ascii=False)\n with open(\"data/translated/train_arabic.json\", \"w\", encoding='utf8') as file:\n file.write(train_dump)\n with open(\"data/translated/test_arabic.json\", \"w\", encoding='utf8') as file:\n file.write(test_dump)\n\ndef remove_test():\n with open(\"test_pos_raw.json\", \"r\", encoding='utf8') as file:\n translated = json.load(file)\n \n res = []\n chinese = {'mBART': [], 'helsinki': []}\n for item in translated:\n if item['language'] == 'chinese':\n chinese[item['translator']].append(item)\n else:\n res.append(item)\n \n res.extend(chinese['mBART'][:350])\n res.extend(chinese['helsinki'][:350])\n \n test_dump = json.dumps(res, indent=4, ensure_ascii=False)\n with open(\"test_pos.json\", \"w\", encoding='utf8') as file:\n file.write(test_dump)\n \n\n\nif __name__ == '__main__':\n # lang = ['arabic', 'chinese', 'indonesian', 'japanese']\n # train_files = [f'translated/from_{lang[i]}/train_{lang[i]}.json' for i in range(4)]\n # merge_data(train_files, 'train.json')\n # test_files = [f'translated/from_{lang[i]}/test_{lang[i]}.json' for i in range(4)]\n # merge_data(test_files, 'test.json')\n \n \n # merge_data(['train_pos1.json', 'train_pos2.json'], 'train_pos.json')\n remove_test()\n pass","repo_name":"whitejeep600/translation_language_detection","sub_path":"models/pos/data/postprocessing.py","file_name":"postprocessing.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42558438195","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\nimport utils.conf as conf\n\ndevice = conf.device\n\n\nclass GaussianNoise(nn.Module):\n \"\"\"\n Create Gaussian noise on the input with specified signal to noise ration snr.\n \"\"\"\n def __init__(self, snr):\n super(GaussianNoise, self).__init__()\n self.snr = snr\n\n def forward(self, y):\n std = torch.std(y, dim=1) * np.power(10.0, -self.snr / 20)\n noise = torch.normal(torch.zeros_like(y, device=device),\n std=(torch.zeros_like(y, device=device) + std.reshape(-1, 1)))\n return y + noise\n","repo_name":"feeds/na-alista","sub_path":"utils/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"19273055573","text":"from PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, Conv2DTranspose, UpSampling2D\nfrom keras.layers.normalization import BatchNormalization as BN\nimport keras\nimport cv2\nimport numpy as np\nfrom keras.optimizers import SGD\n\nopt = SGD(lr=1, momentum=0.9)\ndef Mask_Gen():\n model = Sequential()\n model.add(Conv2D(30,kernel_size = (5,5),\n strides=(1,1),padding='valid',activation='relu',input_shape=(374, 324, 3)))\n model.add(BN())\n model.add(Conv2D(30,kernel_size=(5,5),\n strides=1,activation='relu'))\n model.add(BN())\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='valid'))\n model.add(Conv2D(16,kernel_size=(3,3),\n strides=1,activation='relu'))\n model.add(BN())\n model.add(Conv2D(16,kernel_size=(3,3),\n strides=1,activation='relu'))\n model.add(BN())\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='valid'))\n model.add(Conv2D(16,kernel_size=(3,3),\n strides=1,activation='relu'))\n model.add(BN())\n model.add(Conv2D(16,kernel_size=(3,3),\n strides=1,activation='relu'))\n model.add(BN())\n model.add(Conv2D(16,kernel_size=(3,3),\n strides=1,activation='relu'))\n model.add(Dropout(0.5))\n model.add(BN())\n model.add(Conv2D(1,kernel_size=(3,3),\n strides=1,activation='relu'))\n model.compile(loss='mse',\n optimizer='adam',\n metrics=['accuracy'])\n # model.summary()\n\n return model\n\nclassifier = Mask_Gen()\nclassifier.load_weights(\"model_pure_cnn_mse.h5\")\nprint('Loaded Weights')\n\n# Prediction for the example\nimg = Image.open('dataset/original/four_orig.jpeg')\nimg = cv2.resize(np.float32(img), dsize=(324, 374), interpolation=cv2.INTER_CUBIC)\n# If only one channel\nimg = cv2.merge((img,img,img))\n# print(img.shape)\nx = np.expand_dims(img, axis=0)\n# print(x.shape)\n\nx = np.expand_dims(img, axis=0)\n\nres = classifier.predict(x)\n# print(res.shape)\nres = np.squeeze(res, axis=0)\nres = np.squeeze(res, axis=-1)\n# print(res.shape)\n# res = cv2.GaussianBlur(res, (11, 11), 0)\n# res = cv2.threshold(res, 5, 255, cv2.THRESH_BINARY)[1]\n# 16,17 for four/one. 17,18 for three/two.\n\n# the area of the image with the largest intensity value\n(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(res)\n# print(maxLoc)\nx = maxLoc[0]\ny = maxLoc[1]\nval = int(res[y,x])\nprint(\"Threshold Pixel Value \" + str(val))\nlower_red = np.array([val])\nupper_red = np.array([val+1])\n# print(res[44,32])\nres = cv2.inRange(res, lower_red, upper_red)\nlabels = {1:'Forged',2:'Pristine'}\nplt.imshow(res.astype('uint8'))\ncbar = plt.colorbar()\ncbar.ax.set_yticklabels(['Pristine','','','','','Forged'])\ncbar.set_label('Forgery Index')\nplt.show()\n\nx_max = maxLoc[0] + 10\nx_min = maxLoc[0] - 10\ny_max = maxLoc[1] + 10\ny_min = maxLoc[1] - 10\n# print(x_max)\nobject = cv2.imread('dataset/original/four_orig.jpeg')\n# cv2.imshow('Orig', object)\nobject = cv2.resize(object, dsize=(81, 69), interpolation=cv2.INTER_CUBIC)\n# cv2.imshow('Resize', object)\ncv2.rectangle(object,(int(x_min),int(y_min)),(int(x_max),int(y_max)),(0,255,0),3)\n# object = cv2.resize(np.float32(object), dsize=(81, 69), interpolation=cv2.INTER_CUBIC)\n\ncv2.imshow('Box',object)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\ncv2.imwrite('Results/Dice/four_bound_box_dice.png',object)\n","repo_name":"vam-sin/Image-Forgery-Detection","sub_path":"src/CNN/run_pure_conv.py","file_name":"run_pure_conv.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"37178537725","text":"def subeNotas (porcentaje,x):\n notasBien = [] # Sube cada nota un porcentaje\n for i in x:\n i = i + i*porcentaje/100\n notasBien.append (i)\n return notasBien\n\ndef difSubida (notas, notasBien):\n difSubidas = [] # Calcula la diferencia de cada nota\n pos = 0\n for j in notasBien:\n difSubidas.append (notasBien[pos]-notas[pos])\n pos += 1\n return difSubidas\n\ndef mediaSubida (gg):\n num = len (gg) # Calcula la media de subida\n suma = sum (gg)\n manolo = suma/num\n return manolo\n\ndef sinSuspensos (asignaturas, notas, notasBien):\n posN = 0 # Elimina de las listas\n for k in notasBien: # los elementos suspensos\n if k < 5:\n asignaturas.remove (asignaturas[posN])\n notas.remove (notas[posN])\n notasBien.remove (notasBien[posN])\n posN += 1\n\ndef boleton (asignaturas, notas, notasBien):\n boletin = [] #Imprime el ÚNICO y GENUINO boletín\n posB = 0\n for l in asignaturas:\n print (\"| \\t %s \\t | \\t %d \\t | \\t %.1f \\t |\" % (asignaturas[posB], notas[posB], notasBien[posB]))\n posB += 1\n\n\n#...............................................................................\n\nnombre = \"Pablo de Cara Jiménez\"\nprint (nombre)\n\nasignaturas = [\"LM\", \"ED\", \"Prog\", \"SI\", \"BD\", \"FOL\"]\nnotas = [7,3,9,8,8,4]\n\nnotasBien = subeNotas (20, notas)\nprint (\"Las notas correctas son:\")\nprint (notasBien)\n\ndifSubidas = difSubida (notas, notasBien)\n#print (difSubidas)\nmedia = mediaSubida (difSubidas)\n#print (\"La media de la subida de notas es %.1f\" % media)\n\naprobadas = sinSuspensos (asignaturas, notas, notasBien)\n#print (\"Las asignaturas aprobadas son:\")\n#print (asignaturas)\nprint (\"\")\nprint (\"\\t \\t \\tBoletín\")\nprint (\"\")\nboletin = boleton (asignaturas, notas, notasBien)\nprint (\"\")\nsalir = input (\"Presione Enter para salir\") \t\t# Esto es solo para que no salga automáticamente\n","repo_name":"FranSV98/PYTHON","sub_path":"FUNCIONES/EXAMEN LISTAS PABLO.py","file_name":"EXAMEN LISTAS PABLO.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71034117352","text":"import re\nimport os\nimport json\n\n# Prints the table's headings\ndef print_table_headings(col_lengths):\n headings = ['', 'Word Types (terms)', 'Non-positional postings']\n sub_headings = ['Size of', 'dictionary', 'Non-positional index']\n sub_headings2 = ['Size', 'Δ', 'cml']\n col_index = 0\n\n print('|', end='')\n for heading in headings:\n print(' '*((col_lengths[col_index] - len(heading))//2) + heading + ' '*(((col_lengths[col_index] - len(heading))//2) + (col_lengths[col_index] - len(heading)) % 2) + '|', end='')\n col_index += 1\n print()\n\n\n col_index = 0\n\n print('|', end='')\n for sub_heading in sub_headings:\n print(' '*((col_lengths[col_index] - len(sub_heading))//2) + sub_heading + ' '*(((col_lengths[col_index] - len(sub_heading))//2) + (col_lengths[col_index] - len(sub_heading)) % 2) + '|', end='')\n col_index += 1\n print()\n\n\n col_index = 1\n num_of_columns = len(col_lengths)\n\n print('|' + ' '*(col_lengths[0]) + '|', end='')\n while col_index < num_of_columns:\n print(' '*((col_lengths[col_index] - len(sub_headings2[0]) - 8)//2) + sub_headings2[0] + ' '*(((col_lengths[col_index] - len(sub_headings2[0]) - 8)//2) + (col_lengths[col_index] - len(sub_headings2[0])) % 2), end='')\n print(' ' + sub_headings2[1] + ' ' + sub_headings2[2] + ' |', end='')\n col_index += 1\n print()\n\n\n# Prints the table's body\ndef print_table_body():\n for testName, testValues in compression_table.items():\n # Start of table line\n print('|', end='')\n\n # Test Name\n if 'test_name' in testValues:\n print(' ' + testValues['test_name'] + ' '*((col_lengths[0] - len(testValues['test_name']) - 1)) + '|', end='')\n\n else:\n print_empty_tableCell(col_lengths[0])\n\n # Dictionary totals\n if 'total_dictionary_size' in testValues:\n total_value = \"{:,}\".format(testValues['total_dictionary_size'])\n total_value_length = len(total_value)\n\n # Dictionary total\n print(' ' + total_value + ' '*(col_lengths[1] - total_value_length - 9), end='')\n\n\n if testName == 'unfilteredd':\n print(' '*(8), end='')\n\n else:\n # Delta dictionary total\n if 'delta_dictionary' in testValues:\n delta_value = formatNumber(testValues['delta_dictionary'])\n print(delta_value + ' ', end='')\n\n else:\n print(' '*(4), end='')\n\n\n # Cml dictionary total\n if 'cml_dictionary' in testValues:\n cml_value = formatNumber(testValues['cml_dictionary'])\n print(cml_value + ' |', end='')\n\n else:\n print(' '*(4) + '|', end='')\n\n else:\n print_empty_tableCell(col_lengths[1])\n\n\n # Postings totals\n if 'total_postings' in testValues:\n total_value = \"{:,}\".format(testValues['total_postings'])\n total_value_length = len(total_value)\n\n \n # Postings total\n print(' ' + total_value + ' '*(col_lengths[2] - total_value_length - 9), end='')\n\n\n if testName == 'unfilteredd':\n print(' '*(8), end='')\n\n else:\n # Delta postings total\n if 'delta_postings' in testValues:\n delta_value = formatNumber(testValues['delta_postings'])\n print(delta_value + ' ', end='')\n\n else:\n print(' '*(4), end='')\n\n\n # Cml postings total\n if 'cml_postings' in testValues:\n cml_value = formatNumber(testValues['cml_postings'])\n print(cml_value + ' |', end='')\n\n else:\n print(' '*(4) + '|', end='')\n\n else:\n print_empty_tableCell(col_lengths[2])\n\n # end of table line\n print()\n\n\n# Prints an empty table cell\ndef print_empty_tableCell(length):\n print(' '*(length) + '|', end='')\n\n\n# Prints an empty table line\ndef print_empty_tableLine(col_lengths):\n col_index = 0\n num_of_columns = len(col_lengths)\n\n print('|', end='')\n while col_index < num_of_columns:\n print(' '*(col_lengths[col_index]) + '|', end='')\n col_index += 1\n print()\n\n\n# Prints a horizontal line (for separation)\ndef print_horizontal_tableLine(col_lengths, isTop=False):\n col_index = 0\n num_of_columns = len(col_lengths)\n col_dividor_symbol = '|'\n\n if isTop:\n col_dividor_symbol = '_'\n\n print(col_dividor_symbol, end='')\n while col_index < num_of_columns:\n print('_'*(col_lengths[col_index]) + col_dividor_symbol, end='')\n col_index += 1\n print()\n\n\n# Formats percentages of compression\ndef formatNumber(number):\n returnedValue = ''\n\n if abs(number) < 10:\n returnedValue = ' -' + str(abs(number))\n else:\n returnedValue = '-' + str(abs(number))\n\n return returnedValue\n\n\n# Gets the length the of the dictionary\ndef getTotal_dictionary(SPIMI_dictionary):\n return len(SPIMI_dictionary)\n\n\n# Gets the length of all the postings lists. It adds all the documents frequencies.\ndef getTotal_postings(SPIMI_dictionary):\n totalPostings = 0\n\n for term in SPIMI_dictionary:\n totalPostings += SPIMI_dictionary[term]['df']\n\n return totalPostings\n\n\n# It calculates the cumulative compression change\ndef getCumulative(currentTestValue, prevTestValue):\n return int(round(((prevTestValue - currentTestValue) / prevTestValue) * 100))\n\n\n# Removes numbers from the inverted index\ndef noNumbers(unfiltered_SPIMI_dictionary):\n numberPattern = re.compile(\"[0-9]+\")\n noNumbers_dictionary = {}\n\n for term in unfiltered_SPIMI_dictionary:\n if not numberPattern.match(term):\n noNumbers_dictionary[term] = unfiltered_SPIMI_dictionary[term]\n\n return noNumbers_dictionary\n\n\n# Case folds the inverted index\ndef caseFolding(noNumbers_dictionary):\n caseFolding_dictionary = {}\n\n for term in noNumbers_dictionary:\n lowercase_term = term.lower()\n\n if lowercase_term in caseFolding_dictionary:\n new_posting_list = []\n new_posting_list = list(set(caseFolding_dictionary[lowercase_term]['docs']) | set(noNumbers_dictionary[term]['docs']))\n new_posting_list = sorted(new_posting_list)\n caseFolding_dictionary[lowercase_term]['df'] = len(new_posting_list)\n caseFolding_dictionary[lowercase_term]['docs'] = new_posting_list\n else:\n caseFolding_dictionary[lowercase_term] = noNumbers_dictionary[term]\n\n return caseFolding_dictionary\n\n\n# Removes the stop words from the inverted index\ndef stopWords_removal(caseFolding_dictionary, stopWords_list):\n stopWords_dictionary = {}\n\n for term in caseFolding_dictionary:\n if term not in stopWords_list:\n stopWords_dictionary[term] = caseFolding_dictionary[term]\n\n return stopWords_dictionary\n\n\n# Removes 30 stop words\ndef stopWords_30(caseFolding_dictionary):\n # Stop words taken from the slides. Chapter 2 (32/62)\n stopWords_30_list = [\"a\", \"an\", \"and\", \"are\", \"as\", \"at\", \"be\", \"by\", \"for\", \"from\", \"has\", \"he\", \"his\", \"i\", \"in\",\n \"is\", \"it\", \"its\", \"of\", \"on\", \"she\", \"that\", \"the\", \"to\", \"was\", \"we\", \"were\", \"will\", \"with\", \"you\"]\n\n return stopWords_removal(caseFolding_dictionary, stopWords_30_list)\n\n\n# Removes 150 stop words\ndef stopWords_150(caseFolding_dictionary):\n # Stop words taken from https://www.ranks.nl/stopwords\n stopWords_150_list = [\"a\", \"about\", \"above\", \"after\", \"again\", \"against\", \"all\", \"almost\", \"also\", \"always\", \"am\",\n \"an\", \"and\", \"any\", \"anyone\", \"are\", \"around\", \"as\", \"at\", \"be\", \"because\", \"been\", \"before\", \"being\", \"below\",\n \"between\", \"both\", \"but\", \"by\", \"can't\", \"could\", \"did\", \"didn't\", \"do\", \"does\", \"doesn't\", \"doing\", \"don't\", \"down\",\n \"during\", \"each\", \"few\", \"for\", \"from\", \"further\", \"had\", \"has\", \"have\", \"having\", \"he\", \"he's\", \"her\", \"here\",\n \"hers\", \"herself\", \"him\", \"himself\", \"his\", \"how\", \"i\", \"i'd\", \"i'll\", \"i'm\", \"i've\", \"if\", \"in\", \"into\", \"is\",\n \"isn't\", \"it\", \"it's\", \"its\", \"itself\", \"let's\", \"me\", \"more\", \"most\", \"mostly\", \"much\", \"my\", \"myself\", \"never\",\n \"no\", \"nor\", \"not\", \"of\", \"off\", \"on\", \"once\", \"only\", \"or\", \"other\", \"ought\", \"our\", \"out\", \"over\", \"own\", \"same\",\n \"she\", \"should\", \"so\", \"some\", \"someone\", \"something\", \"such\", \"than\", \"that\", \"that's\", \"the\", \"their\", \"them\",\n \"then\", \"there\", \"there's\", \"these\", \"they\", \"they're\", \"they've\", \"this\", \"those\", \"through\", \"to\", \"together\",\n \"too\", \"under\", \"until\", \"up\", \"us\", \"very\", \"was\", \"wasn't\", \"we\", \"we'd\", \"we're\", \"we've\", \"well\", \"were\", \"what\",\n \"what's\", \"when\",\"where\", \"which\", \"while\", \"who\", \"why\", \"will\", \"with\", \"would\", \"you\", \"yourself\"]\n\n return stopWords_removal(caseFolding_dictionary, stopWords_150_list)\n\n\n# Populates the table data\ndef populate_compressionTable_data(test_name, total_dictionary_size, delta_dictionary, cml_dictionary, total_postings, delta_postings, cml_postings):\n compression_table[test_name]['total_dictionary_size'] = total_dictionary_size\n compression_table[test_name]['delta_dictionary'] = delta_dictionary\n compression_table[test_name]['cml_dictionary'] = cml_dictionary\n compression_table[test_name]['total_postings'] = total_postings\n compression_table[test_name]['delta_postings'] = delta_postings\n compression_table[test_name]['cml_postings'] = cml_postings\n\n\n# Prints the table\ndef print_compression_table():\n print_horizontal_tableLine(col_lengths, True)\n print_empty_tableLine(col_lengths)\n print_table_headings(col_lengths)\n print_horizontal_tableLine(col_lengths)\n print_table_body()\n print_horizontal_tableLine(col_lengths)\n\n\n# Execution starts here\ncompression_table = {\n 'unfiltered': {'test_name': 'Unfiltered'},\n 'noNumbers': {'test_name': 'No numbers'},\n 'caseFolding': {'test_name': 'Case folding'},\n 'stopWords_30': {'test_name': '30 Stop words'},\n 'stopWords_150': {'test_name': '150 Stop words'}\n}\n\n# Column widths\ncol_lengths = [16, 20, 25]\n\nSPIMI_file_name = 'SPIMI/Unfiltered_SPIMI/unfiltered_SPIMI_dictionary.txt'\n\nif os.path.isfile(SPIMI_file_name):\n with open(SPIMI_file_name, 'r') as SPIMI_file:\n file_content = SPIMI_file.read()\n unfiltered_SPIMI_dictionary = json.loads(file_content)\n\n unfiltered_totalDictionary = getTotal_dictionary(unfiltered_SPIMI_dictionary)\n unfiltered_totalPostings = getTotal_postings(unfiltered_SPIMI_dictionary)\n\n populate_compressionTable_data('unfiltered', unfiltered_totalDictionary, 0, 0, unfiltered_totalPostings, 0, 0)\n\n\n # No numbers\n noNumbers_dictionary = noNumbers(unfiltered_SPIMI_dictionary)\n\n noNumbers_totalDictionary = getTotal_dictionary(noNumbers_dictionary)\n noNumbers_cml_dictionary = getCumulative(noNumbers_totalDictionary, compression_table['unfiltered']['total_dictionary_size'])\n noNumbers_delta_dictionary = noNumbers_cml_dictionary - compression_table['unfiltered']['cml_dictionary']\n\n noNumbers_totalPostings = getTotal_postings(noNumbers_dictionary)\n noNumbers_cml_postings = getCumulative(noNumbers_totalPostings, compression_table['unfiltered']['total_postings'])\n noNumbers_delta_postings = noNumbers_cml_postings - compression_table['unfiltered']['cml_postings']\n\n populate_compressionTable_data('noNumbers', noNumbers_totalDictionary, noNumbers_delta_dictionary, noNumbers_cml_dictionary, noNumbers_totalPostings, noNumbers_delta_postings, noNumbers_cml_postings)\n\n\n # Case folding\n caseFolding_dictionary = caseFolding(noNumbers_dictionary)\n\n caseFolding_totalDictionary = getTotal_dictionary(caseFolding_dictionary)\n caseFolding_cml_dictionary = getCumulative(caseFolding_totalDictionary, compression_table['unfiltered']['total_dictionary_size'])\n caseFolding_delta_dictionary = caseFolding_cml_dictionary - compression_table['noNumbers']['cml_dictionary']\n\n caseFolding_totalPostings = getTotal_postings(caseFolding_dictionary)\n caseFolding_cml_postings = getCumulative(caseFolding_totalPostings, compression_table['unfiltered']['total_postings'])\n caseFolding_delta_postings = caseFolding_cml_postings - compression_table['noNumbers']['cml_postings']\n\n populate_compressionTable_data('caseFolding', caseFolding_totalDictionary, caseFolding_delta_dictionary, caseFolding_cml_dictionary, caseFolding_totalPostings, caseFolding_delta_postings, caseFolding_cml_postings)\n\n\n # 30 Stop words\n stopWords_30_dictionary = stopWords_30(caseFolding_dictionary)\n\n stopWords_30_totalDictionary = getTotal_dictionary(stopWords_30_dictionary)\n stopWords_30_cml_dictionary = getCumulative(stopWords_30_totalDictionary, compression_table['unfiltered']['total_dictionary_size'])\n stopWords_30_delta_dictionary = stopWords_30_cml_dictionary - compression_table['caseFolding']['cml_dictionary']\n\n stopWords_30_totalPostings = getTotal_postings(stopWords_30_dictionary)\n stopWords_30_cml_postings = getCumulative(stopWords_30_totalPostings, compression_table['unfiltered']['total_postings'])\n stopWords_30_delta_postings = stopWords_30_cml_postings - compression_table['caseFolding']['cml_postings']\n\n populate_compressionTable_data('stopWords_30', stopWords_30_totalDictionary, stopWords_30_delta_dictionary, stopWords_30_cml_dictionary, stopWords_30_totalPostings, stopWords_30_delta_postings, stopWords_30_cml_postings)\n\n\n # 150 Stop words\n stopWords_150_dictionary = stopWords_150(caseFolding_dictionary)\n\n stopWords_150_totalDictionary = getTotal_dictionary(stopWords_150_dictionary)\n stopWords_150_cml_dictionary = getCumulative(stopWords_150_totalDictionary, compression_table['unfiltered']['total_dictionary_size'])\n stopWords_150_delta_dictionary = stopWords_150_cml_dictionary - compression_table['caseFolding']['cml_dictionary']\n\n stopWords_150_totalPostings = getTotal_postings(stopWords_150_dictionary)\n stopWords_150_cml_postings = getCumulative(stopWords_150_totalPostings, compression_table['unfiltered']['total_postings'])\n stopWords_150_delta_postings = stopWords_150_cml_postings - compression_table['caseFolding']['cml_postings']\n\n populate_compressionTable_data('stopWords_150', stopWords_150_totalDictionary, stopWords_150_delta_dictionary, stopWords_150_cml_dictionary, stopWords_150_totalPostings, stopWords_150_delta_postings, stopWords_150_cml_postings)\n\n\n\n print_compression_table()\n print()\n\nelse:\n print(\"The unfiltered SPIMI dictionary file was not found. Please run the 'SPIMI.py' file first.\")\n print(\"Remember to uncomment the unfiltered lines on the 'Tokenizer.py' and 'SPIMI.py'.\")\n","repo_name":"andavazgar/IR_SentimentRanking","sub_path":"Compression_Table.py","file_name":"Compression_Table.py","file_ext":"py","file_size_in_byte":14925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10331764672","text":"#!/usr/bin/env python2\n#-*-coding:utf-8-*-\n\"\"\"\nCreated on Fri Mar 20 15:39:30 2019\n\n@author: lewisliu\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport math\nimport os\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import spline\nimport imageQuality\nfrom tqdm import tqdm\nimport sys\ndefaultencoding = 'utf-8'\nif sys.getdefaultencoding() != defaultencoding:\n reload(sys)\n sys.setdefaultencoding(defaultencoding)\n\n# # 支持中文\n# plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\n# plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n\nplt.figure(figsize=(10,6))\n\nours_imgs_folder_paths = [\n \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/1_0\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/1_1\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/1_2\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/1_3\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/2_0\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/2_1\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/2_2\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/2_3\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/3_0\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/3_1\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/3_2\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOurs/3_3\"\n]\nothers_imgs_folder_paths = [\n \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOthers/A\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOthers/B\",\n # \"/home/lewisliu/Clobotics/WindPower/ImageQuality/Data/IQVS01/FromOthers/C\"\n]\n\n#* list all images in the folder\ndef list_images_in_one_folder(folder_path):\n pics_path = []\n for i in os.walk(folder_path):\n for j in i[2]:\n if 'JPG' in j or 'jpg' in j:\n pic_path = os.path.join(i[0],j)\n pics_path.append(pic_path)\n \n return pics_path\n\n#* define calculate one image's image quality value\ndef cal_img_qaulity(img):\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n img_q = imageQuality.Laplacian(img_gray)\n \n return img_q\n\n#* calculate all images' q and hist\ndef cal_all_images_q_hist(folder_paths):\n img_q_sum = 0\n img_num = 0\n img_q_array = []\n for cur_folder_path in tqdm(folder_paths):\n pics_path = list_images_in_one_folder(cur_folder_path)\n for pic_path in tqdm(pics_path):\n img_num += 1\n img_data = cv2.imread(pic_path)\n img_q = cal_img_qaulity(img_data)\n img_q_sum += img_q\n img_q_array.append(img_q)\n\n img_q_average = 0\n if img_num > 0:\n img_q_average = img_q_sum / img_num\n \n print(\"img_q_average: {}\".format(img_q_average))\n print(\"min_q: {}, max_q: {}\".format(min(img_q_array), max(img_q_array)))\n print(\"img_sum: {}\".format(img_num))\n # print(img_q_array)\n #* calculate the distribution\n scale = 10.0\n # X = np.linspace(0, 100, 100)\n X = np.arange(0, 100, 1)\n Y = np.zeros(100)\n for cur_img_q in img_q_array:\n y = int(cur_img_q)\n if y >= len(Y):\n print(\"Wrong! cur_y: {} is out the range of array {}\".format(y, len(Y)))\n else:\n Y[y] += 1\n for index in range(len(Y)):\n Y[index] /= img_num\n X = np.array(X)\n Y = np.array(Y)\n \n return img_q_average, img_num, X, Y\n\nif __name__ == \"__main__\":\n #* calculate the average laplacian value for ours images\n our_imgs_ave_q, our_imgs_sum, our_X, our_Y = cal_all_images_q_hist(ours_imgs_folder_paths)\n print(\"our_imgs_ave_q: {}\".format(our_imgs_ave_q))\n print(\"our_imgs_sum: {}\".format(our_imgs_sum))\n\n #* calculate the average laplacian value for others images\n other_imgs_ave_q, other_imgs_sum, other_X, other_Y = cal_all_images_q_hist(others_imgs_folder_paths)\n print(\"other_imgs_ave_q: {}\".format(other_imgs_ave_q))\n print(\"other_imgs_sum: {}\".format(other_imgs_sum))\n\n # plt.plot(our_X, our_Y, linestyle='-', color ='b', label=\"Clobotics Images\")\n plt.plot(our_X, our_Y, linestyle='-', color ='b', label=\"Dataset A: Clobotics Images Qaulity Distribution Curve\") # Qaulity Distribution Curve\n plt.plot(other_X, other_Y,linestyle='-', color = 'r', label=\"Dataset B: Third-part Images Qaulity Distribution Curve\")\n # plt.title(u'图集图像清晰度分布图')\n plt.xlabel('Image Quality Laplacian Value')\n plt.ylabel('Images Quality Probability')\n plt.title(\"Images Quality Distribution Graph\")\n plt.grid(alpha=0.2)\n plt.legend()\n plt.savefig(\"g.png\")\n plt.show()\n\n\n # xnew = np.linspace(0,100,1000) #300 represents numbof points to make between T.min and T.max\n # power_smooth = spline(X,Y,xnew)\n # plt.plot(xnew,power_smooth)\n # plt.show()\n\n# # plt.savefig(\"g.png\")\n\n# if __name__ == \"__main__\":\n# T = np.array([6, 7, 8, 9, 10, 11, 12])\n# power = np.array([1.53E+03, 5.92E+02, 2.04E+02, 7.24E+01, 2.72E+01, 1.10E+01, 4.70E+00])\n# plt.plot(T,power)\n# plt.show()\n","repo_name":"starLewis/computer_vision_pub","sub_path":"35ImageQaulity/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15452244231","text":"from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport base64\nimport os\nimport tempfile\n\nimport requests\n\nfrom appr.commands.command_base import CommandBase, PackageSplit\nfrom appr.formats.helm.manifest_chart import ManifestChart\nfrom appr.formats.appr.manifest_jsonnet import ManifestJsonnet\nfrom appr.formats.utils import detect_format\nfrom appr.pack import pack_kub\nfrom appr.utils import package_filename\n\n\nclass PushCmd(CommandBase):\n name = 'push'\n help_message = \"push a package to the registry\"\n default_media_type = None\n\n def __init__(self, options):\n super(PushCmd, self).__init__(options)\n self.registry_host = options.registry_host\n self.force = options.force\n self.manifest = None\n self.media_type = options.media_type\n if options.media_type is self.default_media_type:\n self.media_type = os.getenv(\"APPR_DEFAULT_MEDIA_TYPE\", self.default_media_type)\n\n self.channel = options.channel\n self.version = options.version\n self.filter_files = True\n self.metadata = None\n self.prefix = None\n self.manifest_name = None\n self.package_name = options.package\n self.package_parts = options.package_parts\n self.pname = self.package_parts.get('package', None)\n self.namespace = self.package_parts.get('namespace', None)\n\n if self.namespace is None:\n self.namespace = options.ns\n\n self.version_parts = options.version_parts\n\n if self.version == \"default\":\n self.version = None\n\n self.ssl_verify = options.cacert or not options.insecure\n self.status = ''\n\n @classmethod\n def _add_arguments(cls, parser):\n cls._add_registryhost_option(parser)\n cls._add_mediatype_option(parser, cls.default_media_type, required=False)\n cls._add_packageversion_option(parser)\n parser.add_argument(\"--ns\", \"--namespace\", default=None, help=argparse.SUPPRESS)\n parser.add_argument(\"-f\", \"--force\", action='store_true', default=False, help=\"force push\")\n parser.add_argument(\"-c\", \"--channel\", default=None, help=\"Set a channel\")\n parser.add_argument(\"--version-parts\", default={}, help=argparse.SUPPRESS)\n parser.add_argument(\"--package-parts\", default={}, help=argparse.SUPPRESS)\n parser.add_argument('package', nargs='?', default=None, action=PackageSplit,\n help=\"repository dest\")\n\n def _push(self):\n client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)\n filename = package_filename(self.package_name, self.version, self.media_type)\n kubepath = os.path.join(tempfile.gettempdir(), filename + \".tar.gz\")\n pack_kub(kubepath, filter_files=self.filter_files, prefix=self.prefix)\n kubefile = open(kubepath, 'rb')\n body = {\n \"manifest_name\": self.manifest_name,\n \"name\": self.package_name,\n \"release\": self.version,\n \"metadata\": self.metadata,\n \"media_type\": self.media_type,\n \"blob\": base64.b64encode(kubefile.read())}\n try:\n client.push(self.package_name, body, self.force)\n self.status = \"package: %s (%s | %s) pushed\\n\" % (self.package_name, self.version,\n self.media_type)\n except requests.exceptions.RequestException as exc:\n if not (self.channel and exc.response.status_code in [409, 400]):\n raise\n kubefile.close()\n os.remove(kubepath)\n if self.channel:\n client.create_channel_release(self.package_name, self.channel, self.version)\n self.status += \">>> Release '%s' added to '%s'\" % (self.version, self.channel)\n\n def _chart(self):\n self.manifest = ManifestChart()\n self.manifest_name = self.manifest.name\n if self.pname is None:\n self.pname = self.manifest.name\n self.prefix = self.pname\n self.filter_files = False\n if self.namespace is None:\n raise argparse.ArgumentTypeError(\"Missing option: --namespace\")\n self.package_name = \"%s/%s\" % (self.namespace, self.pname)\n if self.version is None:\n self.version = self.manifest.version\n self.metadata = self.manifest.metadata()\n\n def _all_formats(self):\n self.filter_files = False\n if self.version is None or self.version == \"default\":\n raise argparse.ArgumentTypeError(\"Missing option: --version\")\n if self.package_name is None:\n raise argparse.ArgumentTypeError(\"Missing option: --name\")\n self.namespace, self.pname = self.package_name.split(\"/\")\n\n def _kpm(self):\n self.filter_files = False\n self.manifest = ManifestJsonnet()\n ns, name = self.manifest.package['name'].split(\"/\")\n if not self.namespace:\n self.namespace = ns\n if not self.pname:\n self.pname = name\n self.package_name = \"%s/%s\" % (self.namespace, self.pname)\n if not self.version or self.version == \"default\":\n self.version = self.manifest.package['version']\n self.metadata = self.manifest.metadata()\n\n def _init(self):\n if self.media_type is None:\n self.media_type = detect_format(\".\").media_type\n if self.media_type in [\"kpm\", \"kpm-compose\"]:\n self._kpm()\n elif self.media_type in ['helm', 'chart']:\n self._chart()\n else:\n self._all_formats()\n\n def _call(self):\n self._init()\n self._push()\n\n def _render_dict(self):\n return {\n \"manifest_name\": self.manifest_name,\n \"package\": self.package_name,\n \"version\": self.version,\n \"media_type\": self.media_type,\n \"channel\": self.channel}\n\n def _render_console(self):\n return self.status\n","repo_name":"hangyan/appr","sub_path":"appr/commands/push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23658447084","text":"#!/usr/bin/env python\n# Python Network Programming Cookbook, Second Edition -- Chapter - 9\n# This program is optimized for Python 2.7.12.\n# It may run on any other version with/without modifications.\n\nimport argparse\nfrom mininet.net import Mininet\nfrom mininet.topolib import TreeTopo\n\n# Emulate a network with depth of depth_ and fanout of fanout_\ndef emulate(depth_, fanout_):\n \n # Create a network with tree topology\n tree_ = TreeTopo(depth=depth_,fanout=fanout_)\n \n # Initiating the Mininet instance\n net = Mininet(topo=tree_)\n \n # Start Execution of the Emulated System.\n net.start()\n\n # Name two of the instances as h1 and h2.\n h1, h2 = net.hosts[0], net.hosts[depth_]\n\n # Ping from an instance to another, and print the output.\n print (h1.cmd('ping -c1 %s' % h2.IP()))\n\n # Stop the Mininet Emulation.\n net.stop()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Mininet Simple Emulation')\n parser.add_argument('--depth', action=\"store\", dest=\"depth\", type=int, required=True)\n parser.add_argument('--fanout', action=\"store\", dest=\"fanout\", type=int, required=True)\n given_args = parser.parse_args()\n emulate(given_args.depth, given_args.fanout)\n\n","repo_name":"PacktPublishing/Python-Network-Programming-Cookbook-Second-Edition","sub_path":"Chapter09/9_2_mininet_emulation.py","file_name":"9_2_mininet_emulation.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"72"} +{"seq_id":"72215411752","text":"from os import path as osp\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.parallel import DataContainer as DC\nfrom mmcv.runner import auto_fp16\n\nfrom mmdet3d.core import show_seg_result\nfrom mmseg.models.segmentors import BaseSegmentor\n\n\nclass Base3DSegmentor(BaseSegmentor):\n \"\"\"Base class for 3D segmentors.\n\n The main difference with `BaseSegmentor` is that we modify the keys in\n data_dict and use a 3D seg specific visualization function.\n \"\"\"\n\n @property\n def with_regularization_loss(self):\n \"\"\"bool: whether the segmentor has regularization loss for weight\"\"\"\n return hasattr(self, 'loss_regularization') and \\\n self.loss_regularization is not None\n\n def forward_test(self, points, img_metas, **kwargs):\n \"\"\"Calls either simple_test or aug_test depending on the length of\n outer list of points. If len(points) == 1, call simple_test. Otherwise\n call aug_test to aggregate the test results by e.g. voting.\n\n Args:\n points (list[list[torch.Tensor]]): the outer list indicates\n test-time augmentations and inner torch.Tensor should have a\n shape BXNxC, which contains all points in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n \"\"\"\n for var, name in [(points, 'points'), (img_metas, 'img_metas')]:\n if not isinstance(var, list):\n raise TypeError(f'{name} must be a list, but got {type(var)}')\n\n num_augs = len(points)\n if num_augs != len(img_metas):\n raise ValueError(f'num of augmentations ({len(points)}) != '\n f'num of image meta ({len(img_metas)})')\n\n if num_augs == 1:\n return self.simple_test(points[0], img_metas[0], **kwargs)\n else:\n return self.aug_test(points, img_metas, **kwargs)\n\n @auto_fp16(apply_to=('points'))\n def forward(self, return_loss=True, **kwargs):\n \"\"\"Calls either forward_train or forward_test depending on whether\n return_loss=True.\n\n Note this setting will change the expected inputs. When\n `return_loss=True`, point and img_metas are single-nested (i.e.\n torch.Tensor and list[dict]), and when `resturn_loss=False`, point and\n img_metas should be double nested (i.e. list[torch.Tensor],\n list[list[dict]]), with the outer list indicating test time\n augmentations.\n \"\"\"\n if return_loss:\n return self.forward_train(**kwargs)\n else:\n return self.forward_test(**kwargs)\n\n def show_results(self,\n data,\n result,\n palette=None,\n out_dir=None,\n ignore_index=None,\n show=False,\n score_thr=None):\n \"\"\"Results visualization.\n\n Args:\n data (list[dict]): Input points and the information of the sample.\n result (list[dict]): Prediction results.\n palette (list[list[int]]] | np.ndarray): The palette of\n segmentation map. If None is given, random palette will be\n generated. Default: None\n out_dir (str): Output directory of visualization result.\n ignore_index (int, optional): The label index to be ignored, e.g.\n unannotated points. If None is given, set to len(self.CLASSES).\n Defaults to None.\n show (bool, optional): Determines whether you are\n going to show result by open3d.\n Defaults to False.\n TODO: implement score_thr of Base3DSegmentor.\n score_thr (float, optional): Score threshold of bounding boxes.\n Default to None.\n Not implemented yet, but it is here for unification.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n if palette is None:\n if self.PALETTE is None:\n palette = np.random.randint(\n 0, 255, size=(len(self.CLASSES), 3))\n else:\n palette = self.PALETTE\n palette = np.array(palette)\n for batch_id in range(len(result)):\n if isinstance(data['points'][0], DC):\n points = data['points'][0]._data[0][batch_id].numpy()\n elif mmcv.is_list_of(data['points'][0], torch.Tensor):\n points = data['points'][0][batch_id]\n else:\n ValueError(f\"Unsupported data type {type(data['points'][0])} \"\n f'for visualization!')\n if isinstance(data['img_metas'][0], DC):\n pts_filename = data['img_metas'][0]._data[0][batch_id][\n 'pts_filename']\n elif mmcv.is_list_of(data['img_metas'][0], dict):\n pts_filename = data['img_metas'][0][batch_id]['pts_filename']\n else:\n ValueError(\n f\"Unsupported data type {type(data['img_metas'][0])} \"\n f'for visualization!')\n file_name = osp.split(pts_filename)[-1].split('.')[0]\n\n pred_sem_mask = result[batch_id]['semantic_mask'].cpu().numpy()\n\n show_seg_result(\n points,\n None,\n pred_sem_mask,\n out_dir,\n file_name,\n palette,\n ignore_index,\n show=show)\n","repo_name":"HuangJunJie2017/BEVDet","sub_path":"mmdet3d/models/segmentors/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":1058,"dataset":"github-code","pt":"72"} +{"seq_id":"22839645876","text":"from collections import defaultdict\ndef solution(clothes):\n answer = 1\n cloth_dict = defaultdict(int)\n for cloth in clothes:\n cloth_dict[cloth[1]] += 1\n print(cloth_dict)\n for v in cloth_dict.values():\n answer *= v+1\n return answer-1\n\nclothes = [[\"yellow_hat\", \"headgear\"], [\"blue_sunglasses\", \"eyewear\"], [\"green_turban\", \"headgear\"]]\n\nprint(solution(clothes))","repo_name":"code-enig/Python","sub_path":"분류/해시/위장.py","file_name":"위장.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42042178600","text":"import turtle\nt=turtle.Turtle()\nt.shape('turtle')\ns=turtle.textinput(\"\",\"이름을 입력하세요:\")\nt.write(\"안녕하세요?\"+s+\"씨, 다현 인사드립니다.\")\nwn=turtle.Screen()\nwn.bgcolor(\"black\")\nt.begin_fill()\nt.color(\"red\")\ns=turtle.textinput(\"\",\"이름을 입력하세요:\")\nt.write(\"안녕하세요?\"+s+\"씨, 다현 인사드립니다.\")\nt.left(45)\nt.forward(200)\nt.circle(73,211.37)\nt.left(180)\nt.circle(73,211.37)\nt.forward(200)\nt.end_fill()\n","repo_name":"kimdahyun0402/dao","sub_path":"파이썬/하트.py","file_name":"하트.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41790806163","text":"class Solution:\n def longestValidParentheses(self, s: str) -> int:\n stack = [-1]\n r = 0\n for i, c in enumerate(s):\n if c == '(':\n stack.append(i)\n else:\n del stack[-1]\n if stack:\n l = i-stack[-1]\n r = max(l, r)\n else:\n stack.append(i)\n return r\n\n\ns = Solution()\na = \")()())()()(\"\nprint(s.longestValidParentheses(a))\n","repo_name":"susurrant/LeetCode","sub_path":"32.py","file_name":"32.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33693742074","text":"from spea.SPEA import spea\nfrom lector import Y_TRUE_KROAB, mab1, mab2,mac1, mac2, spea_f_builder, Y_TRUE_KROAC\nimport matplotlib.pyplot as plt\nfrom mas.MAS import calcular_metricas\nfrom metricas import m1,m2,m3,error\nfrom mas.MAS import run_simulation\n\n\n\n\ndef run_spea(f, generacion, poblacion,simulaciones,y_true):\n M1 = 0\n M2 = 0\n M3 = 0\n ERROR = 0\n super_pareto = []\n for i in range(simulaciones):\n pareto_set = spea(poblacion, generacion, f)\n pareto_set = [{\"f1\": org[0],\"f2\": org[1]} for org in map(f, pareto_set)]\n super_pareto = super_pareto + pareto_set\n return super_pareto\n\ndef run_mas():\n super_pareto = []\n for i in range(1):\n pareto = run_simulation(20,100,'./datasets/tsp_kroab100.tsp.txt')\n super_pareto = super_pareto + pareto\n return super_pareto\nf = spea_f_builder(mab1, mab2)\npareto_spea = run_spea(f,100,100,1,Y_TRUE_KROAB)\npareto_mas = run_mas()\nplt.plot([x['f1'] for x in pareto_spea],[y['f2'] for y in pareto_spea],'o', color='green')\nplt.plot([x['f1'] for x in pareto_mas],[y['f2'] for y in pareto_mas],'o', color='blue')\nplt.plot([x['f1'] for x in Y_TRUE_KROAB],[y['f2'] for y in Y_TRUE_KROAB],'o', color='red')\nplt.show()\n","repo_name":"fascmax/tsp","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34002266431","text":"#!/usr/bin/env python3\n# encoding=utf8\n\nimport argparse\nimport sys\nfrom bs4 import BeautifulSoup\nfrom widget import *\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input', help='Input filepath.')\nparser.add_argument('-o', '--output', help='Output filepath. ')\nargs = parser.parse_args()\n\nif args.input:\n src = open(args.input, 'r')\nelse:\n src = sys.stdin\n\nif args.output:\n dst = open(args.output, 'w')\nelse:\n dst = sys.stdout\n\ndata = src.read()\nsoup = BeautifulSoup(data, 'xml')\n\nroot = soup.findChild()\n\nv = create_view(root)\nprint(str(v))\n","repo_name":"wiiiky/layoutcode","sub_path":"layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"71391510952","text":"def check(board) :\n whiteStartBoard = ['W','B','W','B','W','B','W','B']\n blackStartBoard = ['B','W','B','W','B','W','B','W']\n count = [0,0]\n for i in range(8) :\n for j in range(8) :\n if i % 2 == 0 and board[i][j] != whiteStartBoard[j] :\n count[0] += 1\n elif i % 2 == 1 and board[i][j] != blackStartBoard[j] :\n count[0] += 1\n for j in range(8) :\n if i % 2 == 1 and board[i][j] != whiteStartBoard[j] :\n count[1] += 1\n elif i % 2 == 0 and board[i][j] != blackStartBoard[j] :\n count[1] += 1\n\n\n \n\n return min(count)\n\n\nn,m = map(int,input().split())\nboard = [input() for _ in range(n)]\nans = 1000\nfor i in range(0,n-7) :\n for j in range(0,m-7) :\n boardY = []\n for x in range(i,i+8) :\n boardX = []\n for y in range(j,j+8):\n \n boardX.append(board[x][y])\n boardY.append(boardX)\n ctn = check(boardY)\n if ctn < ans:\n ans = ctn\nprint(ans)\n ","repo_name":"jjmin9797/algorithm","sub_path":"20220711/1018.py","file_name":"1018.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16532768717","text":"import sys\n\nsys.stdin = open('input.txt')\n\nT = int(input())\n\nnum = {\n 'ZRO':0,\n 'ONE':1,\n 'TWO':2,\n 'THR':3,\n 'FOR':4,\n 'FIV':5,\n 'SIX':6,\n 'SVN':7,\n 'EGT':8,\n 'NIN':9,\n}\n\nfor _ in range(1, T+1):\n tc, n = input().split()\n data = list(input().split())\n tns = []\n for d in data:\n tns.append(num[d])\n tns.sort()\n result = []\n for t in tns:\n for key, value in num.items():\n if t == value:\n result.append(key)\n print(f'{tc}', *result)","repo_name":"yuueuni/algorithm","sub_path":"swea/SWEA_D3/1221/1221.py","file_name":"1221.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17813625761","text":"\nimport gurobipy\nimport os\nimport logging\n\n\ndef get_gurobi_env():\n try:\n # Try using an ISV license based on the supplied environment variables\n env = gurobipy.Env.OtherEnv(\n os.getenv(\"OLLA_GUROBI_LOGFILE\", \"\"),\n os.getenv(\"OLLA_GUROBI_ISV_NAME\"),\n os.getenv(\"OLLA_GUROBI_ISV_APP_NAME\"),\n int(os.getenv(\"OLLA_GUROBI_ISV_EXPIRATION\")),\n os.getenv(\"OLLA_GUROBI_ISV_CODE\"),\n )\n logging.info(\"Successfully created Gurobi env with ISV license\")\n except:\n logging.warning(\n \"Failed to create env based on ISV license. If you intended to use\",\n \" an ISV license, set the OLLA_GUROBI_ISV_NAME\",\n \" OLLA_GUROBI_ISV_APP_NAME, OLLA_GUROBI_ISV_EXPIRATION,\",\n \" and OLLA_GUROBI_ISV_CODE to their corresponding values.\"\n \" Falling back to default Gurobi environment initialization.\",\n )\n # Fall back to default env\n env = gurobipy.Env()\n\n return env\n","repo_name":"facebookresearch/MODel_opt","sub_path":"model_opt/gurobi_utils.py","file_name":"gurobi_utils.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"72"} +{"seq_id":"74822078951","text":"# Created by hiddencoder at 23.04.2019\nimport json\nimport string\nfrom nltk import word_tokenize, pos_tag\nfrom nltk.stem import WordNetLemmatizer\nfrom textblob import TextBlob\nimport pymorphy2\n\n\nclass Analyzer:\n \"\"\"\n Class which contains main app logic.\n \"\"\"\n\n def __init__(self, replicas, language=\"eng\"):\n if isinstance(replicas, list) or isinstance(replicas, str):\n self.__language = language\n self.__replicas = replicas\n else:\n raise ValueError(\"Wrong data type was given\")\n\n @staticmethod\n def tokenization(sentence):\n \"\"\"\n Tokenizer.\n\n Choose words from sentence\n :return:\n \"\"\"\n tokens = word_tokenize(sentence)\n tokens = [i.lower() for i in tokens if i not in string.punctuation]\n return tokens\n\n def lemmatize_word(self, word):\n if self.__language == \"eng\":\n return WordNetLemmatizer().lemmatize(word, pos=\"v\")\n else:\n morph = pymorphy2.MorphAnalyzer()\n p = morph.parse(word)[0]\n return p.normal_form\n\n def pos_tag_sentence(self, sentence):\n \"\"\"\n PoS-tagging.\n\n Method for Part-of-Speech tagging\n :return: tags for all words in sentence\n \"\"\"\n if self.__language == \"eng\":\n return pos_tag(self.tokenization(sentence))\n elif self.__language == \"rus\":\n morph = pymorphy2.MorphAnalyzer()\n return list(map(lambda x: tuple([x, morph.parse(x)[0].tag.POS]),\n self.tokenization(sentence)))\n\n @staticmethod\n def sentiment(sentences):\n sentiments = []\n for item in sentences:\n blob = TextBlob(item)\n sentiments.append((item, blob.sentiment))\n return sentiments\n\n @staticmethod\n def get_pos_description(tag):\n with open(\"PoS_description.json\", 'r', encoding=\"utf-8\")as file:\n data = json.load(file)\n tag_description = data[\"tags\"][tag]\n return tag_description\n\n @staticmethod\n def detect_language(sentence):\n \"\"\"\n Language detecting.\n\n Used to detect language of sentence\n :param sentence: string sentence\n :return: language\n \"\"\"\n result = TextBlob(sentence).detect_language()\n if result == \"ru\":\n result += \"s\"\n elif result == \"en\":\n result += \"g\"\n return result\n\n @property\n def language(self):\n return self.__language\n","repo_name":"andriymurovanyi/Sentiment-analisys","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22802935624","text":"from OOPLearning.PythonAdvancedOOPCourse.MathPainterApp.canvas import Canvas\r\nfrom OOPLearning.PythonAdvancedOOPCourse.MathPainterApp.shapes import Square, Rectangle\r\n\r\ncolour_library = {\"white\" : (255,255,255), \"black\": (0,0,0), \"blue\":(0,0,255), \"red\" : (255, 0, 0), \"green\": (0,250,0) }\r\n\r\ncanvas_width = int(input(\"How wide would you like the canvas? \\n >> \"))\r\ncanvas_height = int(input(\"How high would you like the canvas? \\n >> \"))\r\ncanvas_colour = input(\"What colour would you like the canvas? Write 'black' or 'white': \\n >> \")\r\ncanvas = Canvas(width=canvas_width, height=canvas_height, color=colour_library[canvas_colour])\r\nshape_choice = \"\"\r\n\r\nwhile True:\r\n # Ask what user wants to draw\r\n shape_choice = input(\r\n \"What would you like to draw? Type: 'square', 'rectangle' or any other input to quit. \\n >> \")\r\n # get rectangle data\r\n if shape_choice == 'rectangle':\r\n\r\n\r\n rectangle_x = int(input(\"Please choose the x position for the top left corner of the rectangle: \\n >> \"))\r\n rectangle_y = int(input(\"Please choose the y position for the top left corner of the rectangle: \\n >> \"))\r\n rectangle_height = int(input(\"Please choose the height of the rectangle: \\n >> \"))\r\n rectangle_width = int(input(\"Please choose the width of the rectangle: \\n >> \"))\r\n rectangle_color_red = 25.5*int(input(\"Please choose the intensity of red for the square (between 1-10) \\n >> \"))\r\n rectangle_color_green = 25.5*int(input(\"Please choose the intensity of green for the square (between 1-10) \\n >> \"))\r\n rectangle_color_blue = 25.5*int(input(\"Please choose the intensity of blue for the square (between 1-10) \\n >> \"))\r\n #shape_choice = input(\"What would you like to draw? Type: 'square', 'rectangle' or any other input to quit. \\n >> \")\r\n\r\n rectangle1 = Rectangle(x=rectangle_x, y=rectangle_y, height=rectangle_height, width=rectangle_width, color=(rectangle_color_red,rectangle_color_green, rectangle_color_blue))\r\n rectangle1.draw(canvas)\r\n\r\n\r\n if shape_choice == 'square':\r\n square_x = int(input(\"Please choose the x position for the top left corner of the square: \\n >> \"))\r\n square_y = int(input(\"Please choose the y position for the top left corner of the square: \\n >> \"))\r\n square_side = int(input(\"Please choose the height of the sides of the square: \\n >> \"))\r\n square_color_red = 25.5*int(input(\"Please choose the intensity of red for the square (between 1-10) \\n >> \"))\r\n square_color_green = 25.5*int(input(\"Please choose the intensity of green for the square (between 1-10) \\n >> \"))\r\n square_color_blue = 25.5*int(input(\"Please choose the intensity of blue for the square (between 1-10) \\n >> \"))\r\n #shape_choice = input(\"What would you like to draw? Type: 'square', 'rectangle' or any other input to quit. \\n >> \")\r\n\r\n square1 = Square(x=square_x, y=square_y, side=square_side, color=(square_color_red,square_color_green, square_color_blue))\r\n square1.draw(canvas)\r\n\r\n if shape_choice == 'quit':\r\n break\r\n\r\ncanvas.Make('files/canvas.png')","repo_name":"Liamcodeshard/PythonStuff","sub_path":"PythonAdvancedOOPCourse/MathPainterApp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38825371867","text":"from django.urls import path, re_path\nfrom .views import *\n\nurlpatterns = [\n path('', WomenHome.as_view(), name='home'), #главная страница, as_view() для вызова функции\n path('about/', about, name='about'),\n path('addpage/', AddPage.as_view(), name='add_page'),\n path('contact/', contact, name='contact'),\n path('login/', login, name='login'),\n path('post//', ShowPost.as_view(), name='post'),\n path('category//', WomenCategory.as_view(), name='category'),\n]","repo_name":"DoriG1/site_biography_women","sub_path":"women/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5220266923","text":"import argparse, os\nfrom pyxrd.__version import __version__\nfrom .appdirs import user_data_dir, user_log_dir\n\n### General Information ###\nVERSION = __version__\n\nDEBUG = False\nFINALIZERS = [] #A list of callables that are called before the main function is left\nBGSHIFT = True\n\nLOG_FILENAME = os.path.join(user_log_dir('PyXRD'), 'errors.log')\n\n### The URL where PyXRD looks for updates & the online manual ###\nUPDATE_URL = 'http://users.ugent.be/~madumon/pyxrd/'\nMANUAL_URL = UPDATE_URL\n\n### Factor to multiply the CSDS average with to obtain the maximum CSDS ###\nLOG_NORMAL_MAX_CSDS_FACTOR = 2.5\n\n### What type of residual error we use: ###\n# \"Rp\" = 'standard' pattern Rp factor\n# \"Rpw\" = 'weighted' pattern Rp factor\n# \"Rpder\" = Rp factor of first-derivatives\nRESIDUAL_METHOD = \"Rp\"\n\n### Default wavelength if no Goniometer is available ###\nDEFAULT_LAMBDA = 0.154056\n\n### GUI Mode (for HPC turn to False) ###\nGUI_MODE = True\n\n\n### Size of parameter space record in bytes ###\nPAR_SPACE_MEMORY_LIMIT = 25 * (1024 * 1024)\n\n### Default Styles & Colors ###\nDEFAULT_LAYOUT = \"VIEWER\" # one of the keys in DEFAULT_LAYOUTS!\nDEFAULT_LAYOUTS = {\n \"FULL\": \"Full\",\n \"VIEWER\": \"View-mode\"\n}\n\nAXES_MANUAL_XMIN = 0.0\nAXES_MANUAL_XMAX = 70.0\nAXES_XSTRETCH = False\nAXES_DSPACING = False\nAXES_DEFAULT_WAVELENGTH = 0.154056\n\nAXES_XLIMIT = 0\nAXES_XLIMITS = {\n 0: \"Automatic\",\n 1: \"Manual\"\n}\n\nAXES_MANUAL_YMIN = 0.0\nAXES_MANUAL_YMAX = 0.0\nAXES_YVISIBLE = False\n\nAXES_YNORMALIZE = 0\nAXES_YNORMALIZERS = {\n 0: \"Multi normalised\",\n 1: \"Single normalised\",\n 2: \"Unchanged raw counts\",\n}\n\nAXES_YLIMIT = 0\nAXES_YLIMITS = {\n 0: \"Automatic\",\n 1: \"Manual\"\n}\n\nEXPERIMENTAL_COLOR = \"#000000\"\nCALCULATED_COLOR = \"#FF0000\"\n\nEXPERIMENTAL_LINEWIDTH = 1.0\nCALCULATED_LINEWIDTH = 2.0\n\nCALCULATED_LINESTYLE = \"-\"\nEXPERIMENTAL_LINESTYLE = \"-\"\n\nCALCULATED_MARKER = \"\"\nEXPERIMENTAL_MARKER = \"\"\n\nPATTERN_LINE_STYLES = {\n \"\": \"Nothing\",\n \"-\": \"Solid\",\n \"--\": \"Dashed\",\n \"-.\": \"Dash Dot\",\n \":\": \"Dotted\"\n}\n\nPATTERN_MARKERS = {\n \"\": \"No marker\",\n \".\": \"Point\",\n \",\": \"Pixel\",\n \"+\": \"Plus\",\n \"x\": \"Cross\",\n \"D\": \"Diamond\",\n \"o\": \"Circle\",\n \"v\": \"Triangle down\",\n \"^\": \"Triangle up\",\n \"<\": \"Triangle left\",\n \">\": \"Triangle right\",\n \"8\": \"Octagon\",\n \"s\": \"Square\",\n \"p\": \"Pentagon\",\n \"*\": \"Star\",\n \"h\": \"Hexagon\",\n}\n\nPATTERN_SHIFT_POSITIONS = {\n 0.42574: \"Quartz 0.42574 SiO2\",\n 0.3134: \"Silicon 0.31355 Si\",\n 0.2476: \"Zincite 0.24759 ZnO\",\n 0.2085: \"Corundum 0.2085 Al2O3\",\n 0.4183: \"Goethite 0.4183 FeO(OH)\",\n 0.48486: \"Gibbsite 0.48486 Al(OH)3\",\n}\nPATTERN_SHIFT_TYPE = \"Displacement\" # or \"Linear\"\nPATTERN_SMOOTH_TYPES = { 0: \"Moving Triangle\" }\nPATTERN_BG_TYPES = { 0: \"Linear\", 1: \"Pattern\" }\n\nDIVERGENCE_MODES = {\n \"AUTOMATIC\": \"Automatic divergence\",\n \"FIXED\": \"Fixed divergence\"\n}\nDEFAULT_DIVERGENCE_MODE = \"FIXED\"\nDEFAULT_SAMPLE_LENGTH = 1.25 # in cm\n\nPLOT_OFFSET = 0.75\nPATTERN_GROUP_BY = 1\nLABEL_POSITION = 0.35\n\nMARKER_VISIBLE = True\nMARKER_X_OFFSET = 0.0\nMARKER_Y_OFFSET = 0.05\nMARKER_POSITION = 0.0\n\nMARKER_INHERIT_COLOR = True\nMARKER_COLOR = \"#000000\"\nMARKER_INHERIT_ANGLE = True\nMARKER_ANGLE = 0.0\nMARKER_INHERIT_TOP_OFFSET = True\nMARKER_TOP_OFFSET = 0.0\nMARKER_INHERIT_BASE = True\nMARKER_BASE = 1\nMARKER_BASES = {\n 0: \"X-axis\",\n 1: \"Experimental profile\",\n 2: \"Calculated profile\",\n 3: \"Lowest of both\",\n 4: \"Highest of both\"\n}\nMARKER_INHERIT_TOP = True\nMARKER_TOP = 0\nMARKER_TOPS = {\n 0: \"Relative to base\",\n 1: \"Top of plot\"\n}\nMARKER_INHERIT_STYLE = True\nMARKER_STYLE = \"none\"\nMARKER_STYLES = {\n \"none\": \"None\", \"solid\": \"Solid\",\n \"dashed\": \"Dash\", \"dotted\": \"Dotted\",\n \"dashdot\": \"Dash-Dotted\", \"offset\": \"Display at Y-offset\"\n}\nMARKER_INHERIT_ALIGN = True\nMARKER_ALIGN = \"left\"\nMARKER_ALIGNS = {\n \"left\": \"Left align\",\n \"center\": \"Centered\",\n \"right\": \"Right align\"\n}\n\nEXCLUSION_FOREG = \"#999999\"\nEXCLUSION_LINES = \"#333333\"\n\n### Plot Information ###\nPLOT_TOP = 0.85\nMAX_PLOT_RIGHT = 0.95\nPLOT_BOTTOM = 0.0\nPLOT_LEFT = 0.05\nPLOT_HEIGHT = PLOT_TOP - PLOT_BOTTOM\n\nOUTPUT_PRESETS = [\n (\"Landscape Large print\", 8000, 4800, 300.0),\n (\"Landscape Medium print\", 6000, 3800, 300.0),\n (\"Landscape Small print\", 4000, 2800, 300.0),\n (\"Portrait Large print\", 4800, 8000, 300.0),\n (\"Portrait Medium print\", 3800, 6000, 300.0),\n (\"Portrait Small print\", 2800, 4000, 300.0),\n]\n\n### Default Directories & Files ###\nDATA_REG = None # set at run-time\nDATA_DIRS = [\n (\"DEFAULT_DATA\", \"./\", None),\n (\"USER_DATA\", user_data_dir('PyXRD'), None),\n (\"LOG_DIR\", user_log_dir('PyXRD'), None),\n (\"DEFAULT_PHASES\", \"default phases/\", \"USER_DATA\"),\n (\"DEFAULT_COMPONENTS\", \"default components/\", \"DEFAULT_DATA\"),\n (\"DEFAULT_GONIOS\", \"default goniometers/\", \"DEFAULT_DATA\"),\n (\"DEFAULT_WL_DISTR\", \"default wavelength distributions/\", \"DEFAULT_DATA\"),\n (\"APPLICATION_ICONS\", \"icons/\", \"DEFAULT_DATA\"),\n]\nDATA_FILES = [\n (\"COMPOSITION_CONV\", \"composition_conversion.csv\", \"DEFAULT_DATA\"),\n (\"ATOM_SCAT_FACTORS\", \"atomic scattering factors.atl\", \"DEFAULT_DATA\"),\n (\"MINERALS\", \"mineral_references.csv\", \"DEFAULT_DATA\"),\n]\n\n### Async calculation providers ###\nASYNC_SERER_PROVIDERS = [\n \"pyxrd.server.provider.Pyro4AsyncServerProvider\",\n \"pyxrd.generic.asynchronous.dummy_async_provider.DummyAsyncServerProvider\",\n]\nASYNC_SERVER_PRELOAD = True\n\n### Runtime Settings Retrieval ###\nSETTINGS_APPLIED = False\nARGS = None\n\ndef _parse_args():\n \"\"\" Parses command line arguments \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"filename\", nargs=\"?\", default=\"\",\n help=\"A PyXRD project filename\"\n )\n parser.add_argument(\n \"-s\", \"--script\", default=\"\",\n help=\"Can be used to pass a script containing a run() function\"\n )\n parser.add_argument(\n \"-d\", \"--debug\", dest='debug', action='store_const',\n const=True, default=False,\n help='Run in debug mode'\n )\n\n args = parser.parse_args()\n del parser # free some memory\n return args\n\n__apply_lock__ = False\ndef initialize(override_debug=DEBUG):\n \"\"\"Apply runtime settings, can and needs to be called only once\"\"\"\n global __apply_lock__, SETTINGS_APPLIED\n if not __apply_lock__ and not SETTINGS_APPLIED:\n __apply_lock__ = True\n\n # Get command line arguments\n global ARGS\n ARGS = _parse_args()\n\n # Set gui flag\n global GUI_MODE\n GUI_MODE = not bool(ARGS.script)\n\n # Set debug flag\n global DEBUG\n DEBUG = ARGS.debug or override_debug\n\n # Setup data registry:\n global DATA_REG, DATA_DIRS, DATA_FILES\n from pyxrd.generic.io.data_registry import DataRegistry\n DATA_REG = DataRegistry(dirs=DATA_DIRS, files=DATA_FILES)\n\n # If we are running in GUI mode, setup GUI stuff:\n if GUI_MODE:\n import matplotlib\n import gi\n gi.require_version('Gtk', '3.0')\n from gi.repository import Gtk, GdkPixbuf\n\n # Setup matplotlib fonts:\n font = {\n 'weight' : 'heavy', 'size': 14,\n\t\t'sans-serif' : 'Helvetica, Arial, sans-serif',\n 'family' : 'sans-serif',\n }\n matplotlib.rc('font', **font)\n mathtext = {\n 'default': 'regular',\n 'fontset': 'stixsans',\n }\n matplotlib.rc('mathtext', **mathtext)\n # matplotlib.rc('text', **{'usetex':True})\n # Load our own icons:\n iconfactory = Gtk.IconFactory()\n icons_path = DATA_REG.get_directory_path(\"APPLICATION_ICONS\")\n for root, dirnames, filenames in os.walk(icons_path):\n for filename in filenames:\n if filename.endswith(\".png\"):\n stock_id = filename[:-4] # remove extensions\n filename = \"%s/%s\" % (icons_path, filename) \n pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)\n iconset = Gtk.IconSet(pixbuf)\n iconfactory.add(stock_id, iconset)\n iconfactory.add_default()\n\n # Make sure default directories exist:\n for path in DATA_REG.get_all_directories():\n try:\n os.makedirs(path)\n except OSError:\n pass\n\n # Free some memory at this point:\n import gc\n gc.collect()\n\n # Log that we did all of this:\n import logging\n logger = logging.getLogger(__name__)\n logger.info(\"Runtime settings applied\")\n\n __apply_lock__ = False\n SETTINGS_APPLIED = True\n\n# ## end of settings\n","repo_name":"PyXRD/PyXRD","sub_path":"pyxrd/data/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8754,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"45124420946","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\n\ndef _create_colorbar(\n fig,\n im,\n ax,\n width=\"10%\",\n height=\"100%\",\n loc=\"center right\",\n bbox_to_anchor=(0.1, 0, 1, 1),\n borderpad=0,\n orientation=\"vertical\",\n ticks=None,\n label=None,\n binary=False,\n ticklabels=None,\n title=None,\n):\n\n axins = inset_axes(\n ax,\n width=width,\n height=height,\n loc=loc,\n bbox_to_anchor=bbox_to_anchor,\n bbox_transform=ax.transAxes,\n borderpad=borderpad,\n )\n\n cbar = fig.colorbar(\n im,\n cax=axins,\n ticks=ticks,\n orientation=orientation,\n label=label,\n )\n\n if ticklabels is not None:\n cbar.set_ticklabels(ticklabels=ticklabels)\n\n if title is not None:\n cbar.ax.set_title(title)\n\n if binary:\n cbar.set_ticks(ticks=[1 / 4, 3 / 4])\n cbar.set_ticklabels(ticklabels=[\"excl\", \"incl\"])\n\n return cbar\n\n\ndef _remove_ax_lines(\n ax,\n dirs=[\"left\", \"right\", \"top\", \"bottom\"],\n):\n for d in dirs:\n ax.spines[d].set_visible(False)\n\n\ndef _remove_ax_ticks(\n ax,\n):\n ax.set(xticks=[], yticks=[])\n\n\ndef _set_ax_text(ax, xlabel=None, ylabel=None, title=None):\n\n ax.set_xlabel(xlabel) if xlabel is not None else None\n ax.set_ylabel(ylabel) if ylabel is not None else None\n ax.set_title(title) if title is not None else None\n\n\ndef _add_label_to_ax(\n ax,\n label,\n px=0.05,\n py=0.94,\n ha=\"left\",\n va=\"top\",\n fontsize=9,\n bbox=dict(boxstyle=\"round\", ec=\"black\", fc=\"white\"),\n):\n\n ax.text(\n px,\n py,\n s=label,\n ha=ha,\n va=va,\n fontsize=fontsize,\n transform=ax.transAxes,\n bbox=bbox,\n )\n\n\ndef _set_ax_line_color(ax, color, dirs=[\"left\", \"right\", \"top\", \"bottom\"]):\n for d in dirs:\n ax.spines[d].set_color(color)\n\n\ndef _create_subplot(ax, mat, lims, cmap, xlabel, ylabel, title):\n\n im = ax.imshow(\n mat,\n vmin=lims[0],\n vmax=lims[1],\n cmap=cmap,\n interpolation=None,\n )\n ax.set(\n xlabel=xlabel,\n ylabel=ylabel,\n title=title,\n )\n return im\n\n\ndef _set_ax_shape(ax, shape, white=True):\n\n return ax.imshow(\n np.ones(shape), cmap=plt.cm.gray if white else plt.cm.binary, vmin=0, vmax=1\n )\n\n\ndef _plotdpi(dpi=124):\n return int(dpi)\n\n\ndef _create_bias_image(mean_image, ref, mask=None) -> np.array:\n \"\"\"Helper function to create bias maps\"\"\"\n\n error_img = np.zeros(ref.shape)\n\n if mask is None:\n mask = ref != 0\n error_img[mask] = 100 * (mean_image[mask] - ref[mask]) / ref[mask]\n\n return error_img\n\n\ndef _create_percentage_diff_image(image, ref, mask):\n\n res = np.zeros(mask.shape)\n res[mask] = image[mask] * (100 / ref[mask])\n\n return res\n\n\ndef _draw_region_box(c, sx, sy):\n\n cx, cy = (c[0], c[1])\n\n trace_x = np.array(\n [\n cx - 0.5 * sx,\n cx - 0.5 * sx,\n cx + 0.5 * sx,\n cx + 0.5 * sx,\n cx - 0.5 * sx,\n ]\n )\n\n trace_y = np.array(\n [\n cy - 0.5 * sy,\n cy + 0.5 * sy,\n cy + 0.5 * sy,\n cy - 0.5 * sy,\n cy - 0.5 * sy,\n ]\n )\n\n return (\n trace_x.astype(np.int),\n trace_y.astype(np.int),\n )\n\n\ndef _select_matrix_from_box(image, trace_x, trace_y, stack=False):\n\n (xmin, xmax, ymin, ymax) = (\n trace_x.min(),\n trace_x.max(),\n trace_y.min(),\n trace_y.max(),\n )\n\n if stack:\n return image[:, ymin:ymax, xmin:xmax]\n return image[ymin:ymax, xmin:xmax]\n\n\ndef _plotsize(\n width=8.25,\n height=11.75,\n indent_width=0,\n indent_height=0,\n):\n # defaults are a4 paper dimensions (inches)\n return (\n width - indent_width,\n height - indent_height,\n )\n\n\ndef _tex_str(string, bold=False):\n # note: replace \\ with \\\\ in argument string\n if bold:\n return \"\\\\textnormal{\" + \"\\\\textbf{\" + string + \"}}\"\n return \"\\\\textnormal{\" + string + \"}\"\n\n\ndef _cmap_binary(\n c0=\"black\",\n c1=\"white\",\n):\n \"function to set binary color map (e.g. for image masks)\"\n return matplotlib.colors.ListedColormap([c0, c1])\n\n\ndef _padding(\n array,\n xx,\n yy,\n constant_values=0,\n):\n\n h, w = array.shape[0], array.shape[1]\n a = (xx - h) // 2\n aa = xx - a - h\n b = (yy - w) // 2\n bb = yy - b - w\n\n return np.pad(\n array,\n pad_width=((a, aa), (b, bb)),\n mode=\"constant\",\n constant_values=constant_values,\n )\n","repo_name":"rvbCMTS/rvbrtg","sub_path":"utils/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8691297759","text":"from itertools import chain\n\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom .base_model import BaseModel\nfrom . import networks\nimport os\nimport time\nfrom .projection import Projection\nfrom torchvision.transforms import Normalize\nfrom .model import Encoder, Decoder, SlotAttention, SlotAttentionRegular, SlotAttentionOwn, Discriminator, get_perceptual_net, raw2outputs, toggle_grad, d_logistic_loss, d_r1_loss, g_nonsaturating_loss\n\n\nclass uorfGanModel(BaseModel):\n\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new model-specific options and rewrite default values for existing options.\n\n Parameters:\n parser -- the option parser\n is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n \"\"\"\n parser.add_argument('--num_slots', metavar='K', type=int, default=8, help='Number of supported slots')\n parser.add_argument('--z_dim', type=int, default=64, help='Dimension of individual z latent per slot')\n parser.add_argument('--slot_init', type=str, default=\"kmeans\", help='What kind of slot initialization do you want: PseudoWeightsOld, PseudoWeights, Context, kmeans, pooling, pixelattn, vectorattn, kmeansexp, hash, SAMLPMax, SA5CC, SAMLPSum, SAMLPMaxRes, SAMLPSumRes, SAPoolingMax, SAPoolingSum, HashMax, HashSum, SAHash, SAHashMax, SAHashMaxReLU, SAHashSum, regular, own, PoolingResMaxPermi') # hashparam, hashparamself,\n parser.add_argument('--attn_iter', type=int, default=3, help='Number of refine iteration in slot attention')\n parser.add_argument('--warmup_steps', type=int, default=1000, help='Warmup steps')\n parser.add_argument('--nss_scale', type=float, default=7, help='Scale of the scene, related to camera matrix')\n parser.add_argument('--render_size', type=int, default=64, help='Shape of patch to render each forward process. Must be Frustum_size/(2^N) where N=0,1,..., Smaller values cost longer time but require less GPU memory.')\n parser.add_argument('--supervision_size', type=int, default=64)\n parser.add_argument('--obj_scale', type=float, default=4.5, help='Scale for locality on foreground objects')\n parser.add_argument('--n_freq', type=int, default=5, help='how many increased freq?')\n parser.add_argument('--n_samp', type=int, default=64, help='num of samp per ray')\n parser.add_argument('--n_layer', type=int, default=3, help='num of layers bef/aft skip link in decoder')\n parser.add_argument('--weight_percept', type=float, default=0.006)\n parser.add_argument('--percept_in', type=int, default=20)\n parser.add_argument('--no_locality_epoch', type=int, default=60)\n parser.add_argument('--bottom', action='store_true', help='one more encoder layer on bottom')\n parser.add_argument('--input_size', type=int, default=128)\n parser.add_argument('--frustum_size', type=int, default=64)\n parser.add_argument('--frustum_size_fine', type=int, default=128)\n parser.add_argument('--attn_decay_steps', type=int, default=2e5)\n parser.add_argument('--coarse_epoch', type=int, default=120)\n parser.add_argument('--near_plane', type=float, default=6)\n parser.add_argument('--far_plane', type=float, default=20)\n parser.add_argument('--d_lr', type=float, default=0.001)\n parser.add_argument('--weight_gan', type=float, default=0.01)\n parser.add_argument('--gan_in', type=int, default=5)\n parser.add_argument('--ndf', type=int, default=64)\n parser.add_argument('--weight_r1', type=float, default=10)\n parser.add_argument('--gan_train_epoch', type=int, default=20)\n parser.add_argument('--fixed_locality', action='store_true', help='enforce locality in world space instead of transformed view space')\n\n parser.set_defaults(batch_size=1, lr=3e-4, niter_decay=0,\n dataset_mode='multiscenes', niter=240, custom_lr=True, lr_policy='warmup')\n\n parser.set_defaults(exp_id='run-{}'.format(time.strftime('%Y-%m-%d-%H-%M-%S')))\n\n return parser\n\n def __init__(self, opt):\n \"\"\"Initialize this model class.\n\n Parameters:\n opt -- training/test options\n\n A few things can be done here.\n - (required) call the initialization function of BaseModel\n - define loss function, visualization images, model names, and optimizers\n \"\"\"\n BaseModel.__init__(self, opt) # call the initialization method of BaseModel\n self.batch_size = opt.batch_size\n self.loss_names = ['recon', 'perc', 'gan', 'd_real', 'd_fake']\n self.loss_recon, self.loss_perc, self.loss_gan, self.loss_d_real, self.loss_d_fake = 0,0,0,0,0\n n = opt.n_img_each_scene\n self.visual_names = ['x{}'.format(i) for i in range(n)] + \\\n ['x_rec{}'.format(i) for i in range(n)] + \\\n ['slot{}_view{}'.format(k, i) for k in range(opt.num_slots) for i in range(n)] + \\\n ['unmasked_slot{}_view{}'.format(k, i) for k in range(opt.num_slots) for i in range(n)] + \\\n ['slot{}_attn'.format(k) for k in range(opt.num_slots)]\n self.model_names = ['Encoder', 'SlotAttention', 'Decoder']\n self.perceptual_net = get_perceptual_net().cuda()\n self.vgg_norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n render_size = (opt.render_size, opt.render_size)\n frustum_size = [self.opt.frustum_size, self.opt.frustum_size, self.opt.n_samp]\n self.projection = Projection(device=self.device, nss_scale=opt.nss_scale,\n frustum_size=frustum_size, near=opt.near_plane, far=opt.far_plane, render_size=render_size)\n frustum_size_fine = [self.opt.frustum_size_fine, self.opt.frustum_size_fine, self.opt.n_samp]\n self.projection_fine = Projection(device=self.device, nss_scale=opt.nss_scale,\n frustum_size=frustum_size_fine, near=opt.near_plane, far=opt.far_plane, render_size=render_size)\n z_dim = opt.z_dim\n self.num_slots = opt.num_slots\n self.netEncoder = networks.init_net(Encoder(3, z_dim=z_dim, bottom=opt.bottom),\n gpu_ids=self.gpu_ids, init_type='normal')\n if opt.slot_init == 'regular':\n self.netSlotAttention = networks.init_net(\n SlotAttentionRegular(num_slots=opt.num_slots, in_dim=z_dim, slot_dim=z_dim, iters=opt.attn_iter),\n gpu_ids=self.gpu_ids, init_type='normal')\n elif opt.slot_init == 'own':\n self.netSlotAttention = networks.init_net(\n SlotAttentionOwn(num_slots=opt.num_slots, in_dim=z_dim, slot_dim=z_dim, iters=opt.attn_iter),\n gpu_ids=self.gpu_ids, init_type='normal')\n else:\n self.netSlotAttention = networks.init_net(\n SlotAttention(opt=opt, num_slots=opt.num_slots, in_dim=z_dim, slot_dim=z_dim, iters=opt.attn_iter), gpu_ids=self.gpu_ids, init_type='normal')\n self.netDecoder = networks.init_net(Decoder(n_freq=opt.n_freq, input_dim=6*opt.n_freq+3+z_dim, z_dim=opt.z_dim, n_layers=opt.n_layer,\n locality_ratio=opt.obj_scale/opt.nss_scale, fixed_locality=opt.fixed_locality), gpu_ids=self.gpu_ids, init_type='xavier')\n self.netDisc = networks.init_net(Discriminator(size=opt.supervision_size, ndf=opt.ndf), gpu_ids=self.gpu_ids, init_type=None)\n if self.isTrain: # only defined during training time\n self.optimizer = optim.Adam(chain(\n self.netEncoder.parameters(), self.netSlotAttention.parameters(), self.netDecoder.parameters()\n ), lr=opt.lr)\n self.optimizers = [self.optimizer]\n self.d_optimizer = optim.Adam(self.netDisc.parameters(), lr=opt.d_lr, betas=(0., 0.9))\n\n self.L2_loss = nn.MSELoss()\n\n def setup(self, opt):\n \"\"\"Load and print networks; create schedulers\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n if self.isTrain:\n self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n self.d_scheduler = networks.get_scheduler(self.d_optimizer, opt)\n if not self.isTrain or opt.continue_train:\n load_suffix = 'iter_{}'.format(opt.load_iter) if opt.load_iter > 0 else opt.epoch\n self.load_networks(load_suffix)\n self.print_networks(opt.verbose)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input: a dictionary that contains the data itself and its metadata information.\n \"\"\"\n self.x = input['img_data'].to(self.device).view(self.batch_size, 4, 3, 128, 128)\n #print(\"self.x\", self.x.shape)\n self.cam2world = input['cam2world'].to(self.device).view(self.batch_size, 4, 4, 4)\n #print(\"self.cam2world\", self.cam2world.shape)\n if not self.opt.fixed_locality:\n self.cam2world_azi = input['azi_rot'].to(self.device).view(self.batch_size, 4, 3, 3)\n #print(\"self.cam2world_azi\", self.cam2world_azi.shape)\n\n def forward(self, epoch=0):\n \"\"\"Run forward pass. This will be called by both functions and .\"\"\"\n self.weight_percept = self.opt.weight_percept if epoch >= self.opt.percept_in else 0\n self.weight_gan = self.opt.weight_gan if epoch >= self.opt.gan_train_epoch + self.opt.gan_in else 0\n self.loss_recon = 0\n self.loss_perc = 0\n dev = self.x[:, 0:1].device\n #print(\"device\", dev)\n #nss2cam0 = torch.stack([self.cam2world[b, 0:1].inverse() if self.opt.fixed_locality else self.cam2world_azi[b, 0:1].inverse() for b in range(self.batch_size)], dim=0)\n nss2cam0 = self.cam2world[:, 0:1].inverse() if self.opt.fixed_locality else self.cam2world_azi[:, 0:1].inverse()\n #print(\"nss2cam0\", nss2cam0.shape)\n # Encoding images\n feature_map = self.netEncoder(torch.stack([F.interpolate(first_scene, size=self.opt.input_size, mode='bilinear', align_corners=False)[None, ...] for first_scene in self.x[:, 0:1]], dim=0).squeeze(1).squeeze(1)) # BxCxHxW\n #print(\"feature_map\", feature_map.shape)\n feat = feature_map.flatten(start_dim=2).permute([0, 2, 1]) # BxNxC\n #print(\"feat\", feat.shape)\n # Slot Attention\n z_slots, attn = self.netSlotAttention(feat) # BxKxC, BxKxN\n #print(\"z_slots\", z_slots.shape)\n #print(\"attn\", attn.shape)\n # z_slots, attn = z_slots.squeeze(0), attn.squeeze(0) # KxC, KxN\n K = self.num_slots # attn.shape[0]\n B = self.batch_size\n cam2world = self.cam2world\n N = cam2world.shape[1]\n frus_nss_coor_list = []\n z_vals_list = []\n ray_dir_list = []\n interpolate_x_list = []\n for b in range(self.batch_size):\n b_cam2world = cam2world[b, ...]\n x_interpol = self.x[b, ...]\n if self.opt.stage == 'coarse':\n frus_nss_coor, z_vals, ray_dir = self.projection.construct_sampling_coor(b_cam2world)\n # (NxDxHxW)x3, (NxHxW)xD, (NxHxW)x3\n x_interpol = F.interpolate(x_interpol, size=self.opt.supervision_size, mode='bilinear', align_corners=False)\n frus_nss_coor_list.append(frus_nss_coor)\n z_vals_list.append(z_vals)\n ray_dir_list.append((ray_dir))\n interpolate_x_list.append(x_interpol)\n # self.z_vals, self.ray_dir = z_vals, ray_dir\n else:\n W, H, D = self.opt.frustum_size_fine, self.opt.frustum_size_fine, self.opt.n_samp\n start_range = self.opt.frustum_size_fine - self.opt.render_size\n rs = self.opt.render_size\n frus_nss_coor, z_vals, ray_dir = self.projection_fine.construct_sampling_coor(b_cam2world)\n # (NxDxHxW)x3, (NxHxW)xD, (NxHxW)x3\n frus_nss_coor, z_vals, ray_dir = frus_nss_coor.view([N, D, H, W, 3]), z_vals.view([N, H, W, D]), ray_dir.view([N, H, W, 3])\n H_idx = torch.randint(low=0, high=start_range, size=(1,), device=dev)\n W_idx = torch.randint(low=0, high=start_range, size=(1,), device=dev)\n frus_nss_coor_, z_vals_, ray_dir_ = frus_nss_coor[..., H_idx:H_idx + rs, W_idx:W_idx + rs, :], z_vals[..., H_idx:H_idx + rs, W_idx:W_idx + rs, :], ray_dir[..., H_idx:H_idx + rs, W_idx:W_idx + rs, :]\n frus_nss_coor, z_vals, ray_dir = frus_nss_coor_.flatten(0, 3), z_vals_.flatten(0, 2), ray_dir_.flatten(0, 2)\n x_interpol = x_interpol[:, :, H_idx:H_idx + rs, W_idx:W_idx + rs]\n frus_nss_coor_list.append(frus_nss_coor)\n z_vals_list.append(z_vals)\n ray_dir_list.append((ray_dir))\n interpolate_x_list.append(x_interpol)\n # self.z_vals, self.ray_dir = z_vals, ray_dir\n self.z_vals, self.ray_dir = torch.stack(z_vals_list), torch.stack(ray_dir_list)\n z_vals, ray_dir = self.z_vals, self.ray_dir\n x = torch.stack(interpolate_x_list)\n #print(\"zvals\", z_vals.shape)\n #print(\"ray_dir\", ray_dir.shape)\n sampling_coor_fg = torch.stack(frus_nss_coor_list)[:, None, ...].expand(-1, K - 1, -1, -1) # Bx(K-1)xPx3\n # sampling_coor_fg = frus_nss_coor[None, ...].expand(K - 1, -1, -1) # (K-1)xPx3\n sampling_coor_bg = torch.stack(frus_nss_coor_list) # BxPx3\n #print(\"sampling_coor_fg\", sampling_coor_fg.shape)\n #print(\"sampling_coor_bg\", sampling_coor_bg.shape)\n\n W, H, D = self.opt.supervision_size, self.opt.supervision_size, self.opt.n_samp\n raws, masked_raws, unmasked_raws, masks = self.netDecoder(sampling_coor_bg, sampling_coor_fg, z_slots, nss2cam0) # Bx(NxDxHxW)x4, BxKx(NxDxHxW)x4, BxKx(NxDxHxW)x4, BxKx(NxDxHxW)x1\n raws = raws.view([B, N, D, H, W, 4]).permute([0, 1, 3, 4, 2, 5]).flatten(start_dim=1, end_dim=3) # Bx(NxHxW)xDx4\n masked_raws = masked_raws.view([B, K, N, D, H, W, 4])\n unmasked_raws = unmasked_raws.view([B, K, N, D, H, W, 4])\n rgb_map, _, _ = raw2outputs(raws, z_vals, ray_dir)\n # (NxHxW)x3, (NxHxW)\n rendered = rgb_map.view(B, N, H, W, 3).permute([0, 1, 4, 2, 3]) # BxNx3xHxW\n #print(\"rendered\", rendered.shape)\n x_recon = (rendered * 2 - 1)#.squeeze()\n #print(\"x_recon\", x_recon.shape)\n d_fake = torch.stack([self.netDisc(x_recon_b) for x_recon_b in x_recon[:]], dim=0)\n self.loss_gan = self.weight_gan * g_nonsaturating_loss(d_fake)\n\n self.loss_recon = self.L2_loss(x_recon, x)\n x_norm, rendered_norm = self.vgg_norm((x + 1) / 2), self.vgg_norm(rendered)\n rendered_feat = torch.stack([self.perceptual_net(rendered_norm_b) for rendered_norm_b in rendered_norm[:]], dim=0)\n x_feat = torch.stack([self.perceptual_net(x_norm_b)for x_norm_b in x_norm[:]], dim=0)\n self.loss_perc = self.weight_percept * self.L2_loss(rendered_feat, x_feat)\n\n with torch.no_grad():\n # do this only for the first element in batch\n attn = attn[0].detach().cpu() # KxN\n H_, W_ = feature_map.shape[2], feature_map.shape[3]\n attn = attn.view(self.opt.num_slots, 1, H_, W_)\n if H_ != H:\n attn = F.interpolate(attn, size=[H, W], mode='bilinear')\n for i in range(self.opt.n_img_each_scene):\n setattr(self, 'x_rec{}'.format(i), x_recon[0][i])\n setattr(self, 'x{}'.format(i), x[0][i])\n setattr(self, 'masked_raws', masked_raws[0].detach())\n setattr(self, 'unmasked_raws', unmasked_raws[0].detach())\n setattr(self, 'attn', attn)\n\n def compute_visuals(self):\n with torch.no_grad():\n _, N, D, H, W, _ = self.masked_raws.shape\n masked_raws = self.masked_raws # KxNxDxHxWx4\n unmasked_raws = self.unmasked_raws # KxNxDxHxWx4\n for k in range(self.num_slots):\n raws = masked_raws[k] # NxDxHxWx4\n z_vals, ray_dir = self.z_vals[0], self.ray_dir[0]\n raws = raws.permute([0, 2, 3, 1, 4]).flatten(start_dim=0, end_dim=2) # (NxHxW)xDx4\n rgb_map, depth_map, _ = raw2outputs(raws[None, ...], z_vals[None, ...], ray_dir[None, ...])\n rgb_map, depth_map = rgb_map.squeeze(0), depth_map.squeeze(0)\n rendered = rgb_map.view(N, H, W, 3).permute([0, 3, 1, 2]) # Nx3xHxW\n x_recon = rendered * 2 - 1\n for i in range(self.opt.n_img_each_scene):\n setattr(self, 'slot{}_view{}'.format(k, i), x_recon[i])\n\n raws = unmasked_raws[k] # (NxDxHxW)x4\n raws = raws.permute([0, 2, 3, 1, 4]).flatten(start_dim=0, end_dim=2) # (NxHxW)xDx4\n rgb_map, depth_map, _ = raw2outputs(raws[None, ...], z_vals[None, ...], ray_dir[None, ...])\n rgb_map, depth_map = rgb_map.squeeze(0), depth_map.squeeze(0)\n rendered = rgb_map.view(N, H, W, 3).permute([0, 3, 1, 2]) # Nx3xHxW\n x_recon = rendered * 2 - 1\n for i in range(self.opt.n_img_each_scene):\n setattr(self, 'unmasked_slot{}_view{}'.format(k, i), x_recon[i])\n\n setattr(self, 'slot{}_attn'.format(k), self.attn[k] * 2 - 1)\n\n def backward(self):\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n loss = self.loss_recon + self.loss_perc + self.loss_gan\n loss.backward()\n self.loss_perc = self.loss_perc / self.weight_percept if self.weight_percept > 0 else self.loss_perc\n self.loss_gan = self.loss_gan / self.weight_gan if self.weight_gan > 0 else self.loss_gan\n\n def forward_disc(self, it=0):\n dev = self.x[0:1].device\n nss2cam0 = self.cam2world[:, 0:1].inverse() if self.opt.fixed_locality else self.cam2world_azi[:, 0:1].inverse()\n # Encoding images\n # print(self.x.shape)\n feature_map = self.netEncoder(torch.stack([F.interpolate(first_scene, size=self.opt.input_size, mode='bilinear', align_corners=False)[None, ...] for first_scene in self.x[:, 0:1]], dim=0).squeeze(1).squeeze(1)) # BxCxHxW\n # print(\"feature_map\", feature_map.shape)\n feat = feature_map.flatten(start_dim=2).permute([0, 2, 1]) # BxNxC\n\n # Slot Attention\n z_slots, attn = self.netSlotAttention(feat) # BxKxC, BxKxN\n # print(\"z_slots\", z_slots.shape)\n # print(\"attn\", attn.shape)\n K = self.num_slots # attn.shape[0]\n B = self.batch_size\n cam2world = self.cam2world\n N = cam2world.shape[1]\n frus_nss_coor_list = []\n z_vals_list = []\n ray_dir_list = []\n interpolate_x_list = []\n for b in range(self.batch_size):\n b_cam2world = cam2world[b, ...]\n x_interpol = self.x[b, ...]\n if self.opt.stage == 'coarse':\n frus_nss_coor, z_vals, ray_dir = self.projection.construct_sampling_coor(b_cam2world) # (NxDxHxW)x3, (NxHxW)xD, (NxHxW)x3\n x_interpol = F.interpolate(x_interpol, size=self.opt.supervision_size, mode='bilinear', align_corners=False)\n frus_nss_coor_list.append(frus_nss_coor)\n z_vals_list.append(z_vals)\n ray_dir_list.append((ray_dir))\n interpolate_x_list.append(x_interpol)\n # self.z_vals, self.ray_dir = z_vals, ray_dir\n else:\n W, H, D = self.opt.frustum_size_fine, self.opt.frustum_size_fine, self.opt.n_samp\n start_range = self.opt.frustum_size_fine - self.opt.render_size\n rs = self.opt.render_size\n frus_nss_coor, z_vals, ray_dir = self.projection_fine.construct_sampling_coor(b_cam2world)\n # (NxDxHxW)x3, (NxHxW)xD, (NxHxW)x3\n frus_nss_coor, z_vals, ray_dir = frus_nss_coor.view([N, D, H, W, 3]), z_vals.view([N, H, W, D]), ray_dir.view([N, H, W, 3])\n H_idx = torch.randint(low=0, high=start_range, size=(1,), device=dev)\n W_idx = torch.randint(low=0, high=start_range, size=(1,), device=dev)\n frus_nss_coor_, z_vals_, ray_dir_ = frus_nss_coor[..., H_idx:H_idx + rs, W_idx:W_idx + rs, :], z_vals[..., H_idx:H_idx + rs, W_idx:W_idx + rs, :], ray_dir[..., H_idx:H_idx + rs, W_idx:W_idx + rs, :]\n frus_nss_coor, z_vals, ray_dir = frus_nss_coor_.flatten(0, 3), z_vals_.flatten(0, 2), ray_dir_.flatten(0, 2)\n x_interpol = x_interpol[:, :, H_idx:H_idx + rs, W_idx:W_idx + rs]\n frus_nss_coor_list.append(frus_nss_coor)\n z_vals_list.append(z_vals)\n ray_dir_list.append((ray_dir))\n interpolate_x_list.append(x_interpol)\n # print(\"Maus\")\n # self.z_vals, self.ray_dir = z_vals, ray_dir\n self.z_vals, self.ray_dir = torch.stack(z_vals_list), torch.stack(ray_dir_list)\n z_vals, ray_dir = self.z_vals, self.ray_dir\n x = torch.stack(interpolate_x_list)\n # print(\"zvals\", z_vals.shape)\n # print(\"ray_dir\", ray_dir.shape)\n sampling_coor_fg = torch.stack(frus_nss_coor_list)[:, None, ...].expand(-1, K - 1, -1, -1) # Bx(K-1)xPx3\n # sampling_coor_fg = frus_nss_coor[None, ...].expand(K - 1, -1, -1) # (K-1)xPx3\n sampling_coor_bg = torch.stack(frus_nss_coor_list) # BxPx3\n # print(\"sampling_coor_fg\", sampling_coor_fg.shape)\n # print(\"sampling_coor_bg\", sampling_coor_bg.shape)\n\n W, H, D = self.opt.supervision_size, self.opt.supervision_size, self.opt.n_samp\n raws, masked_raws, unmasked_raws, masks = self.netDecoder(sampling_coor_bg, sampling_coor_fg, z_slots, nss2cam0) # Bx(NxDxHxW)x4, BxKx(NxDxHxW)x4, BxKx(NxDxHxW)x4, BxKx(NxDxHxW)x1\n raws = raws.view([B, N, D, H, W, 4]).permute([0, 1, 3, 4, 2, 5]).flatten(start_dim=1, end_dim=3) # Bx(NxHxW)xDx4\n\n rgb_map, _, _ = raw2outputs(raws, z_vals, ray_dir) # Bx(NxHxW)x3, Bx(NxHxW)\n rendered = rgb_map.view(B, N, H, W, 3).permute([0, 1, 4, 2, 3]) # BxNx3xHxW\n # print(\"rendered\", rendered.shape)\n x_recon = (rendered * 2 - 1) # .squeeze()\n # print(\"x_recon\", x_recon.shape)\n\n toggle_grad(self.netDisc, True)\n\n fake_img = x_recon\n real_img = x\n #d_fake = torch.stack([self.netDisc(x_recon_b) for x_recon_b in x_recon[:]], dim=0)\n\n fake_pred = torch.stack([self.netDisc(fake_img_b) for fake_img_b in fake_img[:]], dim=0)\n real_pred = torch.stack([self.netDisc(real_img_b) for real_img_b in real_img[:]], dim=0)\n\n d_loss_real, d_loss_fake = d_logistic_loss(real_pred, fake_pred)\n d_loss = d_loss_real + d_loss_fake\n\n self.netDisc.zero_grad()\n d_loss.backward()\n self.d_optimizer.step()\n\n if it % 32 == 0:\n real_img.requires_grad = True\n real_pred = torch.stack([self.netDisc(real_img_b) for real_img_b in real_img[:]], dim=0)\n r1_loss = d_r1_loss(real_pred, real_img)\n self.netDisc.zero_grad()\n (self.opt.weight_r1 * r1_loss).backward()\n self.d_optimizer.step()\n\n toggle_grad(self.netDisc, False)\n\n self.loss_d_real = d_loss_real\n self.loss_d_fake = d_loss_fake\n # print(\"katze\")\n\n def optimize_parameters(self, ret_grad=False, epoch=0):\n \"\"\"Update network weights; it will be called in every training iteration.\"\"\"\n self.forward(epoch)\n for opm in self.optimizers:\n opm.zero_grad()\n self.backward()\n avg_grads = []\n layers = []\n if ret_grad:\n for n, p in chain(self.netEncoder.named_parameters(), self.netSlotAttention.named_parameters(), self.netDecoder.named_parameters()):\n if p.grad is not None and \"bias\" not in n:\n with torch.no_grad():\n layers.append(n)\n avg_grads.append(p.grad.abs().mean().cpu().item())\n for opm in self.optimizers:\n # print('optimizer', time.time())\n opm.step()\n return layers, avg_grads\n\n def save_networks(self, surfix):\n \"\"\"Save all the networks to the disk.\n\n Parameters:\n surfix (int or str) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n super().save_networks(surfix)\n for i, opm in enumerate(self.optimizers):\n save_filename = '{}_optimizer_{}.pth'.format(surfix, i)\n save_path = os.path.join(self.save_dir, save_filename)\n torch.save(opm.state_dict(), save_path)\n\n for i, sch in enumerate(self.schedulers):\n save_filename = '{}_lr_scheduler_{}.pth'.format(surfix, i)\n save_path = os.path.join(self.save_dir, save_filename)\n torch.save(sch.state_dict(), save_path)\n\n def load_networks(self, surfix):\n \"\"\"Load all the networks from the disk.\n\n Parameters:\n surfix (int or str) -- current epoch; used in he file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n super().load_networks(surfix)\n\n if self.isTrain:\n for i, opm in enumerate(self.optimizers):\n load_filename = '{}_optimizer_{}.pth'.format(surfix, i)\n load_path = os.path.join(self.save_dir, load_filename)\n print('loading the optimizer from %s' % load_path)\n state_dict = torch.load(load_path, map_location=str(self.device))\n opm.load_state_dict(state_dict)\n\n for i, sch in enumerate(self.schedulers):\n load_filename = '{}_lr_scheduler_{}.pth'.format(surfix, i)\n load_path = os.path.join(self.save_dir, load_filename)\n print('loading the lr scheduler from %s' % load_path)\n state_dict = torch.load(load_path, map_location=str(self.device))\n sch.load_state_dict(state_dict)\n\n def update_learning_rate(self):\n \"\"\"Update learning rates for all the networks; called at the end of every epoch\"\"\"\n for scheduler in self.schedulers:\n # print('scheduler', time.time())\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\nif __name__ == '__main__':\n pass","repo_name":"slot-initialization/linosic","sub_path":"uORF/models/uorf_gan_model.py","file_name":"uorf_gan_model.py","file_ext":"py","file_size_in_byte":26813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20012746579","text":"import json\n# from preprocessing import *\nfrom typing import List\nimport string\nimport os\nimport random\nimport re\n\nlabel_dict = {'O': 0,'PERIOD':1, 'COMMA':2, 'COLON':3, 'QMARK':4}\nPUNCTUATIONS = ['O','PERIOD', 'COMMA', 'QMARK', 'PERIOD', 'COLON','COMMA']\nPUNCTUATION_SYM = ['','.',',','?','!',':',';']\n# EOS_TOKENS = ['PERIOD','QMARK','EXCLAM']\nEOS_TOKENS = ['PERIOD','QMARK']\n\n\ndef word_map(line:str):\n line = line.split()\n return line[0] + PUNCTUATION_SYM[PUNCTUATIONS.index(line[1])]\ndef show(path):\n text_ls = []\n seq_length = 100\n with open(path) as f:\n ls = f.read().splitlines()\n print(len(ls))\n for i in range(0,len(ls),seq_length):\n temp_text = ' '.join(list(map(word_map,ls[i:i+seq_length])))\n text_ls.append(temp_text)\n # print(temp_text)\n random.shuffle(text_ls)\n with open('check_text.json','w') as f:\n json.dump(text_ls[:5000],f,ensure_ascii=False) \n# show('dataset/icomm_news_dataset/Cleansed_data/train.txt')\n# input()\ncategory_list = ['Chính trị','Đối ngoại','Giáo dục','Pháp luật','Quân sự','Văn hóa','Xã hội']\n\ndef random_infer(path):\n with open(path) as f:\n ls = json.load(f)\n with open('check.json','w') as f:\n json.dump(ls[:100],f,ensure_ascii=False)\n# random_infer('/home/huydang/project/post_asr_normalize/data/news/Xã hội/content.json')\n# print(string.punctuation)\n\ndef de_punc(text:str)->List:\n # Example: Chào, tôi là developer. -> ['Chào O', 'tôi O', ....]\n \n\n words = text.split()\n # print(words)\n result_ls = []\n for ind,word in enumerate(words):\n \n if word[-1] not in PUNCTUATION_SYM[1:]:\n result_ls.append(word + ' O')\n else:\n if (len(word[:-1] ) <= 0):\n # print(word)\n # print(text)\n # print(words[ind+1])\n # print(words[ind-1])\n continue\n punctuation_label = PUNCTUATIONS[PUNCTUATION_SYM.index(word[-1])]\n result_ls.append(word[:-1] + ' ' + punctuation_label)\n return result_ls\n# print(de_punc('Tại hội_nghị giao_ban trực_tuyến của UBND thành_phố Hà_Nội diễn ra hôm_nay ( 6/4 ) , Giám_đốc Sở Nội_vụ Vũ_Thu_Hà cho biết các đoàn kiểm_tra công_vụ đã kiểm_tra đột_xuất 34 lượt các cơ_quan , đơn_vị và kiểm_tra xác_minh 2 vụ_việc theo chỉ_đạo của thành_phố .'))\ndef preprocess(text:str):\n r = text\n try:\n text = text.strip()\n \n text = text.replace('_',' ')\n text = re.sub('\\s+',' ',text)\n for punc in PUNCTUATION_SYM[1:]:\n text = text.lstrip(punc)\n text = re.sub(f'\\s+\\{punc}',f'{punc}',text)\n text = re.sub(f'\\{punc}+',f'{punc}',text)\n if text[-1] not in string.punctuation:\n text += '.'\n except:\n print(r)\n print(text)\n return ''\n return text\ndef get_data():\n train_ratio = 0.7\n dev_ratio = 0.15\n test_ratio = 0.15\n train_origin_text_ls = []\n dev_origin_text_ls = []\n test_origin_text_ls = []\n for category in category_list:\n with open(f'data/news/{category}/content.json') as f:\n temp_ls = json.load(f)\n temp_ls = temp_ls[:int(len(temp_ls)/3)]\n random.shuffle(temp_ls)\n temp_ls = [i['message'] for i in temp_ls if 'message' in i]\n temp_ls = list(map(preprocess,temp_ls))\n temp_ls = [i for i in temp_ls if i != '']\n with open('show_sen.json','w') as f:\n \n json.dump(temp_ls[:100],f,ensure_ascii=False)\n \n split_index1 = int(len(temp_ls)*train_ratio)\n split_index2 = split_index1 + int(len(temp_ls)*dev_ratio)\n print(split_index1,split_index2,len(temp_ls))\n train_origin_text_ls.extend(temp_ls[:split_index1])\n dev_origin_text_ls.extend(temp_ls[split_index1:split_index2])\n test_origin_text_ls.extend(temp_ls[split_index2:])\n print('train doc:',len(train_origin_text_ls))\n print('test doc:',len(test_origin_text_ls))\n train_set = []\n test_set = []\n dev_set = []\n for doc in train_origin_text_ls:\n train_set.extend(de_punc(doc))\n for doc in dev_origin_text_ls:\n dev_set.extend(de_punc(doc))\n for doc in test_origin_text_ls:\n test_set.extend(de_punc(doc))\n \n with open('dataset/icomm_news_dataset/Cleansed_data/train.txt','w') as f:\n f.write('\\n'.join(train_set))\n with open('dataset/icomm_news_dataset/Cleansed_data/test.txt','w') as f:\n f.write('\\n'.join(test_set))\n with open('dataset/icomm_news_dataset/Cleansed_data/valid.txt','w') as f:\n f.write('\\n'.join(dev_set))\n\nget_data()\n# with open('/home/huydang/project/post_asr_normalize/vietnamese-punctuation-prediction/Vietnamese_newspapers/dataset/icomm_news_dataset/Cleansed_data/train.txt') as f:\n# a = f.read().splitlines()\n# print(len(a))\n# print(a[0])","repo_name":"huydang2106/vietnamese_punctuation","sub_path":"handle_news_data.py","file_name":"handle_news_data.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74851647273","text":"#coding:utf-8\n\n# all the imports\nimport sqlite3\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash\nfrom contextlib import closing\n\n# configuration\nDATABASE = '/tmp/flaskr.db'\n#永远不要在生产环境中打开调试模式\nDEBUG = True \nSECRET_KEY = 'development key'\nUSERNAME = 'admin'\nPASSWORD = 'default'\n\n# create our little application\napp = Flask(__name__)\n# 查看给定的对象,搜索对象中所有变量名均为大写字母的变量\napp.config.from_object(__name__) \n\n# 链接数据库\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'])\n\n# 初始化数据库,导入schema.sql\n# 注意!需要导入flaskr的init_db方法,之后init_db()\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql', mode='r')as f:\n db.cursor().executescript(f.read())\n db.commit() \n \n# 有必要在请求之前初始化连接,并在请求后关闭连接\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n@app.teardown_request\ndef teardown_request(exception):\n db = getattr(g, 'db', None) # Getattr用于返回一个对象属性,或者方法\n if db is not None:\n db.close()\n g.db.close()\n\n# 这个视图会把条目做为字典传递给show_enteries.html模板\n@app.route('/') \ndef show_entries():\n # execute是执行sql语句\n cur = g.db.execute('select title, text from entries order by id desc')\n # 字典初始化,key为title,value为text。cur.fetchall()是接收全部的返回结果行\n entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]\n # 1.cucursor()方法的作用?获取操作游标\n # 2.execute方法的作用?执行SQL,括号里的是sql语句 \n # 3.fetchall()方法滴作用?返回查询到的所有记录\n return render_template('show_entries.html', entries=entries)\n \n# 这个视图可以让一个登录后的用户添加一个条目\n@app.route('/add', methods=['POST'])\ndef add_entry():\n if not session.get('logged_in'):\n abort(401)\n # 确保在构建 SQL 语句时使用问号。否则当你使用字符串构建 SQL 时 \n # 容易遭到 SQL 注入攻击。\n g.db.execute('insert into entries (title, text) values (?, ?)',\n [request.form['title'], request.form['text']])\n g.db.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('show_entries'))\n\n# 这些函数用于登录和注销\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != app.config['USERNAME']:\n error = 'Invalid username'\n elif request.form['password'] != app.config['PASSWORD']:\n error = 'Invalid password'\n else:\n session['logged_in'] = True\n flash('You were logged in')\n return redirect(url_for('show_entries'))\n return render_template('login.html', error=error)\n\n# 注销\n@app.route('/logout')\ndef logout():\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('show_entries'))\n\n\n\n\n\n\n\n\nif __name__=='__main__':\n app.run()\n","repo_name":"521xueweihan/LearnFlask","sub_path":"flaskr/flaskr.py","file_name":"flaskr.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"72482890153","text":"from pygments.lexers import get_all_lexers\nfrom pygments.styles import STYLE_MAP\n\ntry:\n from tkinter import *\n from tkinter import messagebox\n from tkinter.filedialog import askopenfilename, asksaveasfilename, askdirectory\n from tkinter import ttk\nexcept ImportError:\n raise ModuleNotFoundError\nimport os\nimport re\nimport fnmatch\nfrom pysnippetmanager.file_browser import FileBrowser\nfrom pysnippetmanager.text_editor import TextEditor\nfrom pysnippetmanager.snippet import Snippet\n\n\nclass App(object):\n \"\"\"\n the gui\n \"\"\"\n frame = None\n snippets = {}\n groups = {}\n last_dir = \"~/\"\n base_dir = \"~/\"\n lexer_options = None\n lexers = None\n lexer_option_menu = None\n id = 0\n\n def __init__(self, master):\n self.init_lexers()\n self.master = master\n self.frame = Frame(master)\n self.frame2 = Frame(master, width=400)\n self.master.winfo_toplevel().title(\"pySnippetManager\")\n\n self.text = TextEditor(self.frame)\n self.tree = FileBrowser(self.frame2, app=self)\n\n # create a toolbar\n toolbar = Frame(master)\n\n # select lexer menu\n self.lexer_options = StringVar(toolbar)\n self.lexer_option_menu = ttk.Combobox(toolbar, textvariable=self.lexer_options, values=list(self.lexers.keys()))\n self.lexer_option_menu.bind(\"<>\", self.lexer_selected)\n self.lexer_option_menu.bind(\"\", self.lexer_selected)\n # select highlight style menu\n self.style_options = StringVar(toolbar)\n self.style_option_menu = ttk.Combobox(toolbar, textvariable=self.style_options, values=list(STYLE_MAP.keys()))\n self.style_option_menu.bind(\"<>\", self.style_selected)\n self.style_option_menu.bind(\"\", self.style_selected)\n self.style_option_menu.set(self.text.style_name)\n\n b_new = Button(toolbar, text=\"NEW\", width=6, command=self.new)\n b_save = Button(toolbar, text=\"SAVE\", width=6, command=self.save)\n b_rd = Button(toolbar, text=\"RESCAN DIR\", width=10, command=self.crawler)\n b_quit = Button(toolbar, text=\"QUIT\", width=6, command=self.quit)\n l_lang = Label(toolbar, text=\"Lang\")\n l_style = Label(toolbar, text=\"Style\")\n\n b_new.pack(side=LEFT, padx=2, pady=2)\n b_save.pack(side=LEFT, padx=2, pady=2)\n b_rd.pack(side=LEFT, padx=2, pady=2)\n b_quit.pack(side=LEFT, padx=2, pady=2)\n l_lang.pack(side=LEFT, padx=2, pady=2)\n self.lexer_option_menu.pack(side=LEFT, padx=2, pady=2)\n l_style.pack(side=LEFT, padx=2, pady=2)\n self.style_option_menu.pack(side=LEFT, padx=2, pady=2)\n toolbar.pack(side=TOP, fill=X)\n\n self.tree.pack(side=LEFT, fill=Y, expand=YES)\n\n self.text.pack(side=LEFT, fill=BOTH, expand=YES)\n self.text.insert(\"1.0\", \"\")\n\n self.frame2.pack(side=LEFT, fill=Y, expand=NO)\n self.frame.pack(side=LEFT, fill=BOTH, expand=YES)\n\n # init content\n self.read_config()\n self.crawler()\n\n def init_lexers(self):\n \"\"\"\n get all available lexers\n \"\"\"\n self.lexers = {}\n for lexer in get_all_lexers():\n self.lexers[lexer[1][0]] = lexer\n\n def read_config(self):\n \"\"\"\n read the configuration file\n \"\"\"\n if os.path.exists(os.path.expanduser('~/.pySnippetManager')):\n with open(os.path.expanduser('~/.pySnippetManager'), 'r') as f:\n self.base_dir = f.readline()\n else:\n if self.open_base_dir():\n with open(os.path.expanduser('~/.pySnippetManager'), 'w') as f:\n f.write(self.base_dir)\n\n def crawler(self):\n \"\"\"\n crawl all files in the base directory\n \"\"\"\n for iid in self.snippets.keys():\n self.tree.delete(iid)\n for iid in self.groups.keys():\n self.tree.delete(iid)\n\n self.snippets = {}\n self.groups = {}\n self.id = 0\n\n includes = ['*.pycsm'] # for files only\n includes = r'|'.join([fnmatch.translate(x) for x in includes])\n for root, subdirs, files in os.walk(self.base_dir):\n\n files = [f for f in files if re.match(includes, f)]\n for file in files:\n if '.backup' in root:\n continue\n self.snippets['%d' % self.id] = Snippet(filename=os.path.join(root, file),\n base_dir=self.base_dir)\n self.tree.add_item(iid=self.id,\n label=self.snippets['%d' % self.id].label,\n lexer_alias=self.snippets['%d' % self.id].lexer_alias,\n parent=self.snippets['%d' % self.id].group)\n self.id += 1\n for subdir in subdirs:\n if subdir == '.backup':\n continue\n self.tree.add_group(os.path.join(root, subdir).replace(os.path.expanduser(self.base_dir), ''))\n\n def save(self):\n \"\"\"\n save the current Snippet\n \"\"\"\n if self.text.snippet is not None:\n self.text.snippet.save()\n\n def quit(self):\n if messagebox.askokcancel(\"Quit\", \"Do you really wish to quit?\"):\n self.save()\n self.master.destroy()\n\n def close(self):\n \"\"\"\n close the current Snippet\n \"\"\"\n self.text.snippet = None\n\n def open(self):\n \"\"\"\n open a new Snippet\n \"\"\"\n filename = askopenfilename(initialdir=self.last_dir,\n filetypes=((\"Snippet Files\", \"*.pycsm\"),\n (\"all files\", \"*.*\")))\n if filename is None:\n return False\n if len(filename) == 0:\n return False\n self.last_dir = os.path.dirname(os.path.abspath(filename))\n self.text.snippet = Snippet(filename=filename, base_dir=self.base_dir)\n self.lexer_option_menu.set(self.text.snippet.lexer_alias)\n\n def open_base_dir(self):\n \"\"\"\n open the base dir\n \"\"\"\n filename = askdirectory(initialdir=self.base_dir,\n title=\"Select Dir\")\n if filename is None:\n return False\n if len(filename) == 0:\n return False\n self.base_dir = filename\n return True\n\n def new(self, subpath=''):\n \"\"\"\n create a new Snippet\n \"\"\"\n\n filename = asksaveasfilename(initialdir=os.path.join(self.base_dir, subpath[1:]),\n title=\"Select file\",\n filetypes=((\"Snippet Files\", \"*.pycsm\"),\n (\"all files\", \"*.*\")))\n if filename is None:\n return False\n if len(filename) == 0:\n return False\n self.text.snippet = Snippet(filename=filename,\n lexer_alias='text',\n create=True,\n base_dir=self.base_dir)\n self.lexer_option_menu.set(self.text.snippet.lexer_alias)\n self.last_dir = os.path.dirname(os.path.abspath(filename))\n self.snippets['%d' % self.id] = self.text.snippet\n self.tree.add_item(iid=self.id,\n label=self.snippets['%d' % self.id].label,\n lexer_alias=self.snippets['%d' % self.id].lexer_alias,\n parent=self.snippets['%d' % self.id].group)\n self.id += 1\n\n def lexer_selected(self, event=None):\n \"\"\"\n\n \"\"\"\n if self.text.snippet is not None:\n self.text.snippet.lexer = self.lexer_option_menu.get()\n self.lexer_option_menu.set(self.text.snippet.lexer_alias)\n\n def style_selected(self, event=None):\n \"\"\"\n\n \"\"\"\n if self.text.snippet is not None:\n self.text.style = self.style_option_menu.get()\n","repo_name":"trombastic/pySnippetManager","sub_path":"pysnippetmanager/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28193165518","text":"import cairo, copy\nfrom Simulation import ECA\nfrom Bitstring import *\n\neca = ECA(30, 31)\neca.setConf(\"1\", 0)\nsteps = 32\ncellSize = 1\nsurface = cairo.ImageSurface(cairo.FORMAT_RGB24, (eca.x.length * cellSize), (steps * cellSize))\nctx = cairo.Context(surface)\nctx.set_source_rgb(0, 0, 0)\nctx.rectangle(0, 0, (eca.x.length * cellSize), (steps * cellSize))\n\n\nxn = copy.deepcopy(eca.x)\ny = 0\nfor i in range(steps):\n\tprint(xn.bits)\n\tx = 0\n\tfor j in range(xn.length):\n\t\tif(xn.bits[j]):\n\t\t\tctx.set_source_rgb(0, 0, 0)\n\t\t\tctx.rectangle(x, y, cellSize, cellSize)\n\t\t\tctx.fill_preserve()\n\t\t\tctx.set_line_width(0.05)\n\t\t\tctx.stroke()\n\t\telse:\n\t\t\tctx.set_source_rgb(1, 1, 1)\n\t\t\tctx.rectangle(x, y, cellSize, cellSize)\n\t\t\tctx.fill_preserve()\n\t\t\tctx.set_source_rgb(0, 0, 0)\n\t\t\tctx.set_line_width(0.05)\n\t\t\tctx.stroke()\n\t\t\n\t\tx += cellSize\n\t\n\txn = copy.deepcopy(eca.evolve(xn))\n\n\ty += cellSize\n\nsurface.write_to_png(\"test.png\")","repo_name":"amIFabi/Fi-Simulator","sub_path":"src/drawTest.py","file_name":"drawTest.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27126489234","text":"# Given a list of numbers and a number k, return whether any two numbers from the list add up to k.\n# For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.\n# Bonus: Can you do this in one pass?\n\ndef addUp(arr, k):\n for x in arr:\n if(x < k):\n if (k-x) in arr[1:]:\n return [x, (k-x)]\n\n return []\n\narr = [10, 15, 3, 7]\nk = 17\nresult = addUp(arr, k)\n\nif(len(result) > 0):\n print(\"True since \" + str(\" + \".join(str(x) for x in result)) + \" is \" + str(k))\n","repo_name":"arthurlbraga/dailycodingproblems","sub_path":"Problem #1 - Easy.py","file_name":"Problem #1 - Easy.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19291027467","text":"#AUTHOR : JUNA TERES MARTIN\n#PROGRAM : Program to find the average of heights of students using for loop in list\n#DATE : 08-03-2023\n\nheights=input(\"Enter the heights in space: \").split()\nfor h in range(0,len(heights)):\n heights[h]=int(heights[h])\nprint(heights)\nsum=0\nfor h in heights:\n sum+=h\nprint(f\"Average= {round(sum/len(heights))}\")","repo_name":"JunaTeresMartin/Python_Programs","sub_path":"exercises_beginnerLevel/exercise_12_average_height.py","file_name":"exercise_12_average_height.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35526140","text":"import threading\nimport socket\nimport pickle\nimport json\n\ndef create_socket(port):\n chat_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n chat_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n chat_socket.bind(('localhost', port))\n chat_socket.listen(1)\n return chat_socket\n\ndef server(chat_socket):\n while True:\n client_socket, client_address = chat_socket.accept()\n print(f\"Connection from {client_address}\")\n threading.Thread(target=handle_client, args=(client_socket,)).start()\n\ndef handle_client(client_socket):\n while True:\n data = client_socket.recv(1024)\n if not data:\n break\n \n received_dict = pickle.loads(data)\n print(f\"Received message: {received_dict}\")\n client_socket.close()\n\ndef client(remote_port):\n while True:\n message = input(\"Enter message (or 'quit' to exit): \")\n if message.lower() == \"quit\":\n break\n\n message_dict = {\n (str(1), str(2)): (str(3), message)}\n\n message_dict[(str(2), str(2))] = (str(5), message)\n\n message_pickle = pickle.dumps(message_dict)\n\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect(('localhost', remote_port))\n\n client_socket.sendall(message_pickle)\n client_socket.close()\n\nif __name__ == '__main__':\n local_port = int(input(\"Enter local port: \"))\n remote_port = int(input(\"Enter remote port: \"))\n\n chat_socket = create_socket(local_port)\n\n server_thread = threading.Thread(target=server, args=(chat_socket,))\n server_thread.daemon = True\n server_thread.start()\n\n client_thread = threading.Thread(target=client, args=(remote_port,))\n client_thread.start()\n client_thread.join()\n\n server_thread.join()","repo_name":"Priaec/DVRouting","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7642867479","text":"import numpy as np\nimport cv2\nimport imutils\n\n#動画読み込み\ncap = cv2.VideoCapture(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\TestMovie_10_03\\\\CIMG5910.MOV\")\noutput = cv2.imread(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\point_check\\\\range_2019-07-02.JPG\")\n\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\ndivided_width = int(width/30)\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\ndivided_height = int(height/20)\nfirst_line = int(height/15)\nlen_pointB_before = 0\n#ここッ戦犯\n\nfps = cap.get(cv2.CAP_PROP_FPS)\n\nfourcc = cv2.VideoWriter_fourcc(*'DIVX')\nwriter = cv2.VideoWriter(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\out.MOV\", fourcc, fps, (width, height))\n\nOld_cmp = 0\ncount = 0\npointA = []\npointB = []\npointB_fin = []\n\n#一枚ずつフレームを取り出し処理を行う\nwhile True:\n print('count',count)\n # 1フレームずつ取得する。\n ret, frame = cap.read()\n \n #取得した中で最大の輪郭を取得\n Area = []\n largestIndex = 0\n largestArea = 0\n def getLargestContoursAreaIndex(coutours):\n for i in range(0, len(contours)):\n Area.append(cv2.contourArea(contours[i]))\n \n largestIndex = np.argmax(Area)\n largestArea = max(Area)\n if largestArea < 15000:\n return None\n \n return largestIndex\n \n\n image = frame\n \n #フレームが空なら終了\n if frame is None:\n \n print('frame is None err in ',count) \n \n count = count +1\n\n break\n \n \n #ぼかし処理\n gray = cv2.GaussianBlur(image, (15, 15), 0)\n \n #HSV変換\n hsv = cv2.cvtColor(gray, cv2.COLOR_BGR2HSV)\n\n\n # HSV空間で肌色の範囲を定義\n hsv_lower_blue = np.array([0,30,50])\n hsv_upper_blue = np.array([30,150,255])\n \n mask_hsv = cv2.inRange(hsv, hsv_lower_blue, hsv_upper_blue)\n \n #二値化画像を出力\n cv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\point_check\\\\\" + str(count) + \".JPG\",mask_hsv)\n\n #全ての輪郭を取得\n contours_image, contours, hierarchy = cv2.findContours(mask_hsv, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n #print(contours)\n\n \n #print(getLargestContoursAreaIndex(contours))\n #輪郭が取得できなかったとき表示\n if getLargestContoursAreaIndex(contours) is None:\n \n print('countors is None err in ',count)\n pointA.append(('',''))\n pointB.append(('',''))\n\n count = count +1\n \n continue\n \n\n #輪郭座標(配列)\n # cnt = getLargestContoursAreaIndex(contours)\n\n cnt = contours[getLargestContoursAreaIndex(contours)]\n\n #print([cnt])\n\n output_image = cv2.drawContours(image, [cnt], 0, (0, 0, 255), 1)\n #cv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\point_check\\\\\" + str(count) + \".JPG\",output_image)\n \n for i in range(0, len(contours)):\n output_all_image = cv2.drawContours(image, contours, i, (255, 0, 0), 1)\n \n output_all_image = cv2.drawContours(output_all_image, [cnt], 0, (0, 0, 255), 1)\n\n cnts = contours_image, [cnt], hierarchy\n\n cnts = imutils.grab_contours(cnts)\n c = max(cnts, key=cv2.contourArea)\n \n #こいつが戦犯\n extBot = tuple(c[c[:, :, 1].argmax()][0])\n #extLeft = tuple(c[c[:, :, 0].argmin()][0])\n cv2.circle(frame,extBot,15,[0,0,255],-1) \n #cv2.circle(frame,extLeft,10,[0,255,0],-1) \n \n #cv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\point_check\\\\\" + str(count) + \".JPG\",frame)\n\n pointA.append(extBot)\n\n\n #凹凸検出\n hull = cv2.convexHull(cnt,returnPoints = False)\n \n defects = cv2.convexityDefects(cnt,hull)\n \n if defects is None:\n print('defects is None err in ',count)\n pointB.append(('',''))\n\n count = count+1\n \n continue\n\n '''\n #print(defects)\n print('extLeft',extLeft)\n print('extBot',extBot)\n print('hull',hull)\n print('defects',defects)\n '''\n \n i = 0\n root_list = []\n \n \n while i <= defects.shape[0]-1:\n print(i)\n s,e,f,d = defects[i,0]\n start = tuple(cnt[s][0])\n end = tuple(cnt[e][0])\n far = tuple(cnt[f][0])\n \n '''\n print(\"start\",start)\n print(\"end\",end)\n print(\"far\",far)\n \n print(i)\n '''\n \n #cv2.line(image,start,end,[0,255,0],2)\n #cv2.circle(frame,far,5,[255,255,0],-1) \n #cv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\point_check\\\\\" + str(count) + \".JPG\",frame)\n \n if extBot[0]-70 <= far[0] and far[0] <= extBot[0]+70:\n #cv2.circle(frame,far,5,[255,0,255],-1) \n root_list.append(far)\n i = i + 1\n \n \n #print(\"far\",far)\n \n \n i = i + 1\n continue\n \n #print('root_list',len(root_list))\n\n \n pointB_tmp = []\n #print('len(root_list)',len(root_list))\n \n if len(root_list) == 1 :\n pointB.append(('',''))\n count = count+1\n\n continue\n \n\n elif len(root_list) == 2:\n pointB_tmp = (int((root_list[0][0] + root_list[1][0])/2),(int((root_list[0][1] + root_list[1][1])/2)))\n pointB.append(pointB_tmp)\n cv2.circle(frame,pointB_tmp,15,[255,0,0],-1) \n cv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\point_check\\\\\" + str(count) + \".JPG\",frame)\n count = count+1\n\n continue\n \n elif len(root_list) == 0 :\n pointB.append(('',''))\n print('pointB is 0 in',count)\n count = count+1\n \n continue\n \n \n elif len(root_list) >= 30 :\n pointB.append(('',''))\n print('pointB is over in',count)\n count = count+1\n \n continue\n \n \n else:\n line = 0\n len_pointB_before = len(pointB)\n pointB.append(('',''))\n New_cmp = 0\n Old_cmp = 0\n #print('pointB',len(pointB))\n while line < extBot[1]:\n above_line = extBot[1] - 120 -line\n under_line = extBot[1] - 60 -line\n #print(line)\n pointB_tmp = []\n for i in range(0 ,len(root_list)):\n if root_list[i][1] <= under_line and above_line <= root_list[i][1]:\n pointB_tmp.append(root_list[i]) \n \n if len(pointB_tmp) == 2:\n tmp = (int((pointB_tmp[0][0] + pointB_tmp[1][0])/2),int((pointB_tmp[0][1] + pointB_tmp[1][1])/2)) \n cv2.circle(frame,tmp,15,[255,0,0],-1) \n cv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\point_check\\\\\" + str(count) + \".JPG\",frame)\n pointB.pop(-1)\n pointB.append(tmp)\n #print('pointB',len(pointB))\n New_cmp =len(pointB)\n \n \n if New_cmp > Old_cmp:\n Old_cmp = New_cmp\n break\n \n if New_cmp <= Old_cmp:\n line = line + 30\n continue\n \n #pointB_fin.append(pointB[MaxIndex])\n \n \n count = count + 1\n continue\n \n \n '''\n line = divided_height\n while line <= extBot[1]:\n \n \n above_line = extBot[1] - first_line - line\n under_line = extBot[1] - line\n \n pointB_tmp = []\n for i in range(0 ,len(root_list)):\n if root_list[i][1] <= under_line and above_line <= root_list[i][1]:\n \n pointB_tmp.append(root_list[i]) \n if len(pointB_tmp) == 2:\n tmp = (int((pointB_tmp[0][0] + pointB_tmp[1][0])/2),int((pointB_tmp[0][1] + pointB_tmp[1][1])/2))\n cv2.circle(frame,tmp,10,[255,0,0],-1) \n cv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\point_check\\\\\" + str(count) + \".JPG\",frame)\n pointB.append(tmp)\n \n \n \n line = line + divided_height\n continue\n '''\n \n #######################\n ###下のコードがあったとこ####\n #######################\n\n #print('pointB',pointB_tmp,'in', count)\n \n \n \n #cv2.circle(image,pointB,15,[255,0,0],-1)\n #cv2.circle(image,extBot,15,[0,0,255],-1)\n \n count = count+1\n \n #print(pointA) \n \n if not ret:\n print('finish')\n break # 映像取得に失敗\n \n \n\n \n \n#print('pointB',pointB) \n#print('pointA',pointA) \n \noutput = cv2.imread(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\2.JPG\")\npath_A = \"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\pointA_cood.TXT\"\npath_B = \"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\pointB_cood.TXT\"\n\nwith open(path_A,\"w\") as f_A:\n \n for k in range(0,len(pointA)):\n text = str(pointA[k][0]) + \",\" + str(pointA[k][1]) + '\\n'\n f_A.write(text)\n if not pointA[k][0] == '':\n cv2.circle(output,pointA[k],5,[0,0,255],-1)\n \n #cv2.circle(output,pointA[k],5,[0,0,255],-1)\n #print(pointA[k])\n\n \nwith open(path_B,\"w\") as f_B:\n \n for i in range(0,len(pointB)):\n \n #print('pointB[i]',pointB[i], i)\n text = str(pointB[i][0]) + \",\" + str(pointB[i][1]) + '\\n'\n f_B.write(text)\n if not pointB[i][0] == '':\n cv2.circle(output,pointB[i],5,[255,0,0],-1)\n #print(pointB[i])\n \n#cv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\PointAandPointB.png\",output)\ncv2.imwrite(\"C:\\\\Users\\\\tuzuk\\\\Desktop\\\\PointAandPointB.png\",output)\n \nprint('width',width)\nprint('height',height)\n \n\n\n''' \n for slide in range(0,height,divided_height):\n \n under_line = extBot[1] - slide\n above_line = extBot[1] - first_line -slide\n \n #print('under_line',under_line)\n #print('above_line',above_line)\n\n \n k=0\n #print(len(root_list)) \n while k <= len(root_list)-1:\n #print('root_list',root_list[k][1])\n #print('under_line_inWhile',under_line)\n #print('above_line_inWhile',above_line)\n #print('k',k)\n if root_list[k][1] <= under_line and above_line <= root_list[k][1]:\n pointB_tmp_tmp = (root_list[k][0],root_list[k][1])\n pointB_tmp.append(pointB_tmp_tmp)\n k = k+1\n\n continue\n \n k = k+1\n continue \n #print('len',len(pointB_tmp))\n if len(pointB_tmp) == 2:\n print('pointB',pointB_tmp)\n pointB.append(pointB_tmp[0])\n #print('pointB',pointB)\n \n \n count = count+1\n continue\n '''\n\n\n\n\n\n \n\n \n ","repo_name":"KariyaSSClubTeam/FingerPencil","sub_path":"FingerRecognizer/FingerRecognizer_ver7.5.py","file_name":"FingerRecognizer_ver7.5.py","file_ext":"py","file_size_in_byte":10885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43532113105","text":"import logging\n\nimport requests\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# SCHEDULER_MODEL = 'django-rate'\nSCHEDULER_MODEL = 'sqlalchemy-rate'\n\n\ndef main():\n req = requests.get(url='http://127.0.0.1:8000/{}/'.format(SCHEDULER_MODEL))\n logger.info('status_code={}'.format(req.status_code))\n logger.info('text={}'.format(req.text))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nsuhara/python-django","sub_path":"common/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69934767272","text":"from pieces import Pawn, Bishop, Rook, Knight, Queen, King\nfrom board import *\nimport random\n\nboard = Board()\n\ndef random_piece(x, y):\n while True:\n rand = random.randint(0, 5)\n\n # Making sure there's no repeats\n if rand == 0 and board.count_pieces(0) >= 8:\n continue\n elif rand == 1 and board.count_pieces(1) >= 2:\n continue\n elif rand == 2 and board.count_pieces(2) >= 2:\n continue\n elif rand == 3 and board.count_pieces(3) >= 2:\n continue\n elif rand == 4 and board.count_pieces(4) >= 1:\n continue\n elif rand == 5 and board.count_pieces(5) >= 1:\n continue\n # Making sure there's no repeats\n\n if rand == 0:\n return Pawn(x, y, board)\n elif rand == 1:\n return Bishop(x, y, board)\n elif rand == 2:\n return Rook(x, y, board)\n elif rand == 3:\n return Knight(x, y, board)\n elif rand == 4:\n return Queen(x, y, board)\n elif rand == 5:\n return King(x, y, board)\n\ndef generate_random_board(n) :\n while (n!=0) :\n x = random.randint(0, 7)\n y = random.randint(0, 7)\n\n #preventing piece stacking\n if (board.get_piece(x, y) != \" \") :\n continue\n #preventing piece stacking\n\n board.add_piece(random_piece(x, y))\n n -= 1\n\ndef run_simulation(n) :\n generate_random_board(n)\n for piece in board.all_pieces :\n if (not (piece.is_protected() and piece.is_protecting())) :\n return False, board, 0\n return True, board, board.num_protections()\n\ntry :\n f = open(\"output.txt\", \"r\")\n best_protections = int(f.readlines()[0].strip())\nexcept :\n best_protections = 0\nbest_board = None\n\nwhile (True) :\n board = Board()\n result, board, protections = run_simulation(16)\n if (result and protections > best_protections) :\n best_protections = protections\n best_board = board\n f = open(\"output.txt\", \"w\")\n f.write(str(protections) + \"\\n\" + str(best_board))\n f.close()\n print(\"NEW BEST BOARD FOUND WITH \" + str(best_protections) + \" PROTECTIONS\")\n elif (result) :\n print(\" - Board found with \" + str(protections) + \" protections\")","repo_name":"AronFrish/Frishberg-Chess-Structure","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24428537597","text":"import dash_html_components as html\nimport dash_bootstrap_components as dbc\n\ndef Navbar():\n navbar = dbc.NavbarSimple(\n children=[\n html.A('Versión Experimental', target='_blank', href='189.253.150.229:8085'), \n html.A(html.Img(src='assets/Logo.png', height='40px', width='90'), target='_blank', href='https://florsheimshoes.com.mx')\n ], \n dark=False)\n\n return navbar\n","repo_name":"musoto96/florsheim-dashboard","sub_path":"navbar.py","file_name":"navbar.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38189305043","text":"import argparse\nimport sys\nimport re\nimport gzip\nimport os\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.parse import urlparse\nimport wget\nfrom urllib.request import urlopen\nimport urllib.request\n\n\n# Function to check url validity\ndef check_validity(url):\n try:\n urllib.request.urlopen(url)\n print(\"Valid URL\")\n except IOError:\n print(\"Invalid URL\")\n sys.exit()\n\n\n# Download both txt and txt.gz files from pipermail webpage and return list\ndef get_files(url):\n links = []\n html = urlopen(url).read()\n html_page = bs(html, features=\"lxml\")\n files = []\n base = urlparse(url)\n for link in html_page.find_all('a'):\n current_link = link.get('href')\n\n #Compose gunzip links\n if current_link.endswith('txt.gz'):\n files.append(current_link)\n links.append(base.scheme + \"://\" + base.netloc + base.path + current_link)\n #Compose normal links\n elif current_link.endswith('txt'):\n files.append(current_link)\n links.append(base.scheme + \"://\" + base.netloc + base.path + current_link)\n #Download prepared links\n for link in links:\n try:\n wget.download(link)\n print(\"\\n\" + link + \"\\n\")\n except:\n print(\"\\n Unable to download file, did you forget a trailing /?\\n\")\n #Reverse order so files are chronologically merged\n files.reverse()\n\n return(files)\n\n\n#Gunzip files if necessary and convert files to mbox\ndef mbox_conversion(files, output):\n output_name = output + '.mbox'\n #Extract files and concatenate into mbox format\n with open(output_name, 'w+') as outfile:\n for file in files:\n if file.endswith('.gz'):\n with gzip.open(file, 'rb') as f:\n for line in f:\n outfile.write(line.decode())\n os.remove(file)\n else:\n with open(file, 'rb') as f:\n for line in f:\n outfile.write(line.decode())\n os.remove(file)\n\n #Replace at in file email headers with @\n with open(output_name, 'r') as outfile:\n res = [re.sub(' at ', '@', i, flags=re.IGNORECASE)\n if 'from' in i[:4].lower() else i for i in outfile.readlines()]\n\n #Overwrite changes to original file\n with open(output_name, 'w') as outfile:\n\n for line in res:\n outfile.write(line)\n\n\n# Define arguments for script\ndef pass_args():\n\n parser = argparse.ArgumentParser(description=\"Download a pipermail archive and convert it to mbox\")\n parser.add_argument('-u', '--url', help=\"Pipermail directory location. Example: http://hostname.tld/directory/. Don't forget the trailing /\")\n parser.add_argument('-o', '--output', help=\"Output file name (without.mbox)\", default='output')\n args = parser.parse_args()\n\n return(args)\n\n\n# Download pipermail archives and convert them to mbox\ndef main():\n args = pass_args()\n check_validity(args.url)\n mbox_conversion(get_files(args.url), args.output)\n\n\nmain()\n","repo_name":"CorneJB/pyperget","sub_path":"pyperget.py","file_name":"pyperget.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16675376378","text":"import pymel.core as pm\nimport maya.cmds as mc\n\n# region convenience methods rewiring attributes\n\n\ndef attrPassThrough(oldAttr, passInAttr, passOutAttr):\n \"\"\" Passes oldAttr into the passInAttr and reroutes all oldAttr's output connections\n to the passOutAttr attribute. Basically rewiring and adding (passInput >--> passOutput)\n the middle.\n \"\"\"\n oldAttr.connect(passInAttr, force=True)\n\n for out in oldAttr.outputs(plugs=True):\n oldAttr.disconnect(out)\n passOutAttr.connect(out)\n\n\ndef attrReplaceOutputs(oldAttr, newAttr):\n \"\"\" Replaces all outputs from `oldOutput` as new outputs coming from `newOutput` \"\"\"\n for out in oldAttr.outputs(plugs=True):\n oldAttr.disconnect(out)\n newAttr.connect(out)\n\n\ndef attrReplaceInput(oldAttr, newAttr):\n \"\"\" Replaces all inputs going into `oldInput` towards the new input `newInput` \"\"\"\n for input in oldAttr.inputs(plugs=True):\n input.disconnect(oldAttr)\n input.connect(newAttr)\n\n\ndef attrDimensions(attr):\n if attr.isArray():\n return attr.numElements()\n elif attr.isCompound():\n return attr.numChildren()\n else:\n return 1\n\n# endregion\n\n\n# region nodes\ndef plusMinusAverage(*args, **kwargs):\n from nodex.core import Nodex\n d = kwargs.pop(\"dimensions\", None)\n o = kwargs.pop(\"operation\", 1)\n name = kwargs.pop(\"name\", \"plusMinusAverage\")\n output = kwargs.pop(\"output3D\", None)\n\n # Get dimensions from input Nodex\n if d is None:\n d = max(x.dimensions() for x in args)\n\n if d > 3:\n raise RuntimeError(\"Can't use plusMinusAverage with higher dimensions than 3\")\n\n # Get corresponding output attribute/length for the current dimension\n resultAttrs = {1: \"output1D\", 2: \"output2D\", 3: \"output3D\"}\n resultAttr = resultAttrs[d]\n\n n = pm.createNode(\"plusMinusAverage\", name=name)\n n.operation.set(o) # average\n for i, v in enumerate(args):\n n_input_attr = n.attr(\"input{dimension}D[{index}]\".format(dimension=d, index=i))\n v.connect(n_input_attr)\n\n result = Nodex(n.attr(resultAttr))\n if output is not None:\n result.connect(output)\n\n return result\n\n\ndef multiplyDivide(input1=None, input2=None, **kwargs):\n from nodex.core import Nodex\n\n o = kwargs.pop(\"operation\", 1)\n setAttr = (('operation', o),)\n inputs = (('input1', input1),\n ('input2', input2))\n\n result = nodeHelper('multiplyDivide', 'output', inputs=inputs, setAttr=setAttr, **kwargs)\n\n return result\n\n # TODO: Old multiplyDivide implementation\n # Ensure nodex inputs\n # if input1 is not None and not isinstance(input1, Nodex):\n # input1 = Nodex(input1)\n # if input2 is not None and not isinstance(input2, Nodex):\n # input2 = Nodex(input2)\n #\n # name = kwargs.pop(\"name\", \"multiplyDivide\")\n #\n # n = pm.createNode(\"multiplyDivide\", name=name)\n # n.operation.set(o) # multiply, divide, pow, etc.\n #\n # d = kwargs.pop(\"dimensions\", None)\n #\n # # Get dimensions from input Nodex\n # if d is None:\n # d = max(x.dimensions() for x in [input1, input2, output] if not x is None)\n #\n # if d > 3:\n # raise RuntimeError(\"Can't use multiplyDivide with higher dimensions than 3\")\n #\n # # Attrs for dimensions\n # input1Attrs = {1: \"input1X\", 2: [\"input1X\", \"input1Y\"], 3: \"input1\"}\n # input1Attr = input1Attrs[d]\n # input2Attrs = {1: \"input2X\", 2: [\"input2X\", \"input2Y\"], 3: \"input2\"}\n # input2Attr = input2Attrs[d]\n # outputAttrs = {1: \"outputX\", 2: [\"outputX\", \"outputY\"], 3: \"output\"}\n # outputAttr = outputAttrs[d]\n #\n # for attr, inputValue in [(input1Attr, input1),\n # (input2Attr, input2)]:\n # if inputValue is not None:\n # inputValue.connect(n.attr(attr))\n #\n # if output is not None:\n # Nodex(n.attr(outputAttr)).connect(output)\n #\n # return Nodex(n.attr(outputAttr))\n\n\ndef nodeHelper(nodeType, chainAttr, inputs=(), outputs=(), minimalOutput=True, setAttr=(), **kwargs):\n from nodex.core import Nodex\n\n # ensure nodex\n inputs = tuple((attrName, attrValue) if isinstance(attrValue, Nodex) else (attrName, Nodex(attrValue))\n for attrName, attrValue in inputs if attrValue is not None)\n\n outputs = tuple((attrName, attrValue) if isinstance(attrValue, Nodex) else (attrName, Nodex(attrValue))\n for attrName, attrValue in outputs if attrValue is not None)\n\n # highest dimensions among inputs\n dim = max(y.dimensions() for x, y in inputs)\n\n # create node\n createKwargs = {}\n name = kwargs.pop('name', None)\n if name:\n createKwargs['name'] = name\n\n n = pm.createNode(nodeType, **createKwargs)\n\n for attrName, attrValue in setAttr:\n n.attr(attrName).set(attrValue) # without nodex (optimization for static values)\n\n # check input dimensions\n for attrName, attrValue in inputs:\n attrNodex = Nodex(n.attr(attrName))\n if dim < attrNodex.dimensions():\n if dim == 1:\n attrNodex = attrNodex[0]\n else:\n attrNodex = attrNodex[:dim]\n attrValue.connect(attrNodex)\n\n # result chain\n result = Nodex(n.attr(chainAttr))\n\n # if dimensions mismatch give resulting dimensions lowest dimensions (if minimalOutput)\n if minimalOutput and dim != 0:\n if result.dimensions() > dim:\n if dim == 1:\n result = result[0]\n else:\n result = result[:dim]\n\n return result\n\n\n\ndef clamp(input=None, min=None, max=None, output=None, **kwargs):\n # TODO: Revise utils.clamp to work with the new Nodex class\n\n name = kwargs.pop(\"name\", \"clamp\")\n suffices = [\"R\", \"G\", \"B\"]\n\n n = pm.createNode(\"clamp\", name=name)\n\n if input is not None:\n connectOrSetVector(n, \"input\", input, suffices=suffices)\n\n if min is not None:\n connectOrSetVector(n, \"min\", min, suffices=suffices)\n\n if max is not None:\n connectOrSetVector(n, \"max\", max, suffices=suffices)\n\n if output is not None:\n connectOutputVector(n, \"output\", output, suffices=suffices)\n\n return Nodex(n.attr('output'))\n\n\ndef doubleLinear(input1=None, input2=None, output=None, nodeType=\"multDoubleLinear\", **kwargs):\n from nodex.core import Nodex\n\n name = kwargs.pop(\"name\", \"clamp\")\n n = pm.createNode(nodeType, name=name)\n\n if input1 is not None:\n Nodex(input1).connect(n.attr('input1'))\n\n if input2 is not None:\n Nodex(input2).connect(n.attr('input2'))\n\n if output is not None:\n Nodex(n.attr('output')).connect(output)\n\n return Nodex(n.attr(\"output\"))\n\n\ndef condition(firstTerm=None, secondTerm=None, ifTrue=None, ifFalse=None, output=None, **kwargs):\n from nodex.core import Nodex\n o = kwargs.pop(\"operation\", 0)\n name = kwargs.pop(\"name\", \"condition\")\n d = kwargs.pop(\"dimensions\", None)\n output = kwargs.pop(\"output\", None)\n\n # Ensure inputs are Nodex\n if firstTerm is not None and not isinstance(firstTerm, Nodex):\n firstTerm = Nodex(firstTerm)\n if secondTerm is not None and not isinstance(secondTerm, Nodex):\n secondTerm = Nodex(secondTerm)\n if ifTrue is not None and not isinstance(ifTrue, Nodex):\n ifTrue = Nodex(ifTrue)\n if ifFalse is not None and not isinstance(ifFalse, Nodex):\n ifFalse = Nodex(ifFalse)\n\n # Get dimensions from input Nodex\n if d is None:\n d = max(x.dimensions() for x in [firstTerm, secondTerm, ifTrue, ifFalse] if not x is None)\n\n if d > 3:\n raise RuntimeError(\"Can't use plusMinusAverage with higher dimensions than 3\")\n\n suffices = [\"R\", \"G\", \"B\"]\n\n n = pm.createNode(\"condition\", name=name)\n n.operation.set(o)\n\n # region define output attribute\n # Get corresponding output attribute/length for the current dimension\n outputAttrs = {1: \"outColorR\", 2: [\"outColorR\", \"outColorG\"], 3: \"outColor\"}\n outputAttr = outputAttrs[d]\n # use node name plus attribute to identify output so we can use it as the nodex directly\n if isinstance(outputAttr, list):\n outputAttr = [\"{0}.{1}\".format(n.name(), x) for x in outputAttr]\n else:\n outputAttr = \"{0}.{1}\".format(n.name(), outputAttr)\n # endregion\n\n if firstTerm is not None:\n firstTerm.connect(n.attr(\"firstTerm\"))\n\n if secondTerm is not None:\n secondTerm.connect(n.attr(\"secondTerm\"))\n\n if ifTrue is not None:\n ifTrue.connect(n.attr(\"colorIfTrue\"))\n else:\n n.attr(\"colorIfTrue\").set((1, 1, 1))\n\n if ifFalse is not None:\n ifFalse.connect(n.attr(\"colorIfFalse\"))\n else:\n n.attr(\"colorIfFalse\").set((0, 0, 0))\n\n if output is not None:\n ifFalse.connect(outputAttr)\n\n return Nodex(outputAttr)\n\n# endregion\n\n\ndef ensurePluginsLoaded(plugins):\n for p in plugins:\n mc.loadPlugin(p, quiet=True)","repo_name":"BigRoy/nodex","sub_path":"nodex/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"14200404221","text":"import pygame\nimport os\nimport math\n\nfrom app.sprites.enemy.enemyCollision import EnemyCollision\nfrom app.sprites.enemy.enemyAttack import EnemyAttack\nfrom app.tools.animation import Animation\nfrom app.AI.steeringAI import SteeringAI\nfrom app.sprites.collisionMask import CollisionMask\nfrom app.tools.imageBox import *\nfrom app.sprites.GUI.lifeBar import LifeBar\n\nfrom app.settings import *\n\nclass EnemyWalk(EnemyCollision):\n def __init__(self, x, y, mapData=None): # ?\n super().__init__(x, y)\n\n self.name = \"enemyWalk\"\n\n self.imageEnemy = rectSurface((ENEMY_DIMX, ENEMY_DIMY), PURPLE, 2)\n self.attackingEnemy = rectSurface((ENEMY_DIMX, ENEMY_DIMY), RED, 2)\n\n self.enemyFrames = [self.imageEnemy]\n self.attackingFrames = [self.attackingEnemy]\n self.animation = Animation(self, self.enemyFrames, 100)\n\n self.rect = self.imageEnemy.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.x = x\n self.y = y\n\n self.speedx = 0\n self.speedy = 0\n\n self.setMapData(mapData)\n\n # if self.mapData.currentLevel >= 5:\n # self.maxSpeedx = 1.4\n # self.maxSpeedy = 1.4\n # else:\n self.maxSpeedx = 1\n self.maxSpeedy = 1\n\n self.isPhysicsApplied = True\n self.isCollisionApplied = True\n\n # self.soundDead = pygame.mixer.Sound(os.path.join('music_pcm', 'Punch2.wav'))\n # self.soundDead.set_volume(1)\n\n self.AI = SteeringAI(self.mapData, self.rect, self.maxSpeedx, self.maxSpeedy)\n self.collisionMask = CollisionMask(self.rect.x, self.rect.y, self.rect.width, self.rect.height)\n\n self.attackDMG = 1\n\n self.mode = WALKING\n self.timerAttack = 0\n self.timeToAttack = 20\n self.timeInAttack = 5\n self.distanceToAttack = 25\n self.attackSprite = None\n self.factorAttack = 1.5\n\n self.maxHealth = 5\n if self.mapData.currentLevel >= 5:\n self.maxHealth = 7\n\n super().generateLifeBar(self.maxHealth)\n\n self.bounty = 10\n\n\n def applyAI(self):\n\n if self.mode == IN_ATTACK:\n if self.timerAttack == 0:\n self.timerAttack += 1\n\n # create an invisible sprite that damage player\n valueX = float(self.image.get_width())*self.factorAttack\n valueY = float(self.image.get_height())*self.factorAttack\n positionX = float(self.rect.x)-(valueX-self.image.get_width())/2\n positionY = float(self.rect.y)-(valueY-self.image.get_height())/2\n self.attackSprite = EnemyAttack(positionX, positionY, (valueX, valueY), self.attackDMG)\n self.attackSprite.setMapData(self.mapData)\n\n # do an animation\n self.animation = Animation(self, self.attackingFrames, 100)\n\n elif self.timerAttack < self.timeInAttack:\n self.timerAttack += 1\n else:\n self.timerAttack = 0\n self.mode = WALKING\n self.AI = SteeringAI(self.mapData, self.rect, self.maxSpeedx, self.maxSpeedy)\n self.attackSprite.kill()\n self.animation = Animation(self, self.enemyFrames, 100)\n\n elif self.mode == PREPARE_ATTACK:\n if self.timerAttack < self.timeToAttack:\n self.timerAttack += 1\n else:\n self.timerAttack = 0\n self.mode = IN_ATTACK\n\n else:\n # if player is close : stop and init timer to attack\n distX = self.mapData.player.rect.x-self.rect.x\n distY = self.mapData.player.rect.y-self.rect.y\n if math.sqrt(distX**2 + distY**2) < self.distanceToAttack:\n self.prepareAttack()\n else:\n steeringX, steeringY = self.AI.getAction()\n\n self.speedx += steeringX\n self.speedy += steeringY\n\n def update(self):\n self.capSpeed()\n self.x += self.speedx\n self.y += self.speedy\n self.rect.x = self.x\n self.rect.y = self.y\n\n super().update()\n\n def capSpeed(self):\n if self.speedx > self.maxSpeedx:\n self.speedx = self.maxSpeedx\n if self.speedx < -self.maxSpeedx:\n self.speedx = -self.maxSpeedx\n if self.speedy > self.maxSpeedy:\n self.speedy = self.maxSpeedy\n if self.speedy < -self.maxSpeedy:\n self.speedy = -self.maxSpeedy\n\n def prepareAttack(self):\n self.mode = PREPARE_ATTACK\n self.timerAttack = 0\n self.speedx = 0\n self.speedy = 0\n\n def attackOnCollision(self):\n self.prepareAttack()\n","repo_name":"Benoit-Pouliot/LD37","sub_path":"app/sprites/enemy/enemyWalk.py","file_name":"enemyWalk.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34999930852","text":"#Para hacer el punto 2 debo hacer primero el 1, ya que necesito crear antes las filas y las columnas\nimport sys\nfrom pyparsing import col\n\nif len(sys.argv) == 3:\n filas = int(sys.argv[1])\n columnas = int(sys.argv[2])\n\n if filas < 1 or filas > 9 or columnas < 1 or columnas > 9:\n print(\"Error\")\n else:\n for i in range(filas):\n print(\" \")\n for c in range(columnas):\n print(\" * \", end='')\nelse:\n print(\"Error\")","repo_name":"carmenm02/EvaluacionTema1","sub_path":"ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3171955760","text":"import argparse\nimport sys\n\n\nclass ArgumentParserSCDG:\n # TODO customize for other tools\n def __init__(self, tool_scdg):\n self.parser = argparse.ArgumentParser(description=\"SCDG module arguments\")\n #self.parser._optionals.title = \"SCDG module arguments\"\n \n self.group_expl = self.parser.add_mutually_exclusive_group() # required=True\n self.group_expl.title = 'SCDG exploration techniques used'\n self.group_expl.add_argument(\n \"--DFS\",\n help=\"TODO\",\n action=\"store_true\",\n \n )\n self.group_expl.add_argument(\n \"--BFS\",\n help=\"TODO\",\n action=\"store_true\",\n \n )\n self.group_expl.add_argument(\n \"--CDFS\",\n help=\"TODO\",\n action=\"store_true\",\n \n )\n self.group_expl.add_argument(\n \"--ThreadCDFS\",\n help=\"TODO\",\n action=\"store_true\",\n \n )\n self.group_expl.add_argument(\n \"--CBFS\",\n help=\"TODO\",\n action=\"store_true\",\n \n )\n self.group_expl.add_argument(\n \"--DBFS\",\n help=\"TODO\",\n action=\"store_true\",\n \n )\n self.group_expl.add_argument(\n \"--SDFS\",\n help=\"TODO\",\n action=\"store_true\",\n \n )\n self.group_expl.add_argument(\n \"--SCDFS\",\n help=\"TODO\",\n action=\"store_true\",\n \n )\n \n self.group_output = self.parser.add_mutually_exclusive_group() # required=True\n self.group_output.title = 'Format to save graph output'\n self.group_output.add_argument(\n \"--gs\",\n help=\".GS format\",\n action=\"store_true\",\n \n )\n self.group_output.add_argument(\n \"--json\",\n help=\".JSON format\",\n action=\"store_true\",\n \n )\n \n self.group_unpacked = self.parser.add_mutually_exclusive_group() # required=True\n self.group_unpacked.title = 'Unpacking method (iff --packed)'\n self.group_unpacked.add_argument(\n \"--symbion\",\n help=\"Concolic unpacking method (linux | windows [in progress])\",\n action=\"store_true\",\n \n )\n self.group_unpacked.add_argument(\n \"--unipacker\",\n help=\"Emulation unpacking method (windows only)\",\n action=\"store_true\",\n \n )\n \n self.group_packed = self.parser.add_argument_group('Packed malware')\n self.group_packed.add_argument(\n \"--packed\",\n help=\"Is the binary packed ? (default : False)\",\n action=\"store_true\",\n \n )\n self.group_packed.add_argument(\n \"--concrete_target_is_local\",\n action=\"store_true\",\n help=\"Use a local GDB server instead of using cuckoo (default : False)\",\n \n )\n \n self.group_expl_param = self.parser.add_argument_group('SCDG exploration techniques parameters')\n self.group_expl_param.add_argument(\n \"--symb_loop\",\n help=\"Number of iteration allowed for a symbolic loop (default : 3) \",\n default=3,\n type=int,\n )\n self.group_expl_param.add_argument(\n \"--limit_pause\",\n help=\"Number of states allowed in pause stash (default : 200)\",\n default=200,\n type=int,\n )\n self.group_expl_param.add_argument(\n \"--max_step\",\n help=\"Maximum number of steps allowed for a state (default : 50 000)\",\n default=50000,\n type=int,\n )\n self.group_expl_param.add_argument(\n \"--max_deadend\",\n help=\"Number of deadended state required to stop (default : 600)\",\n default=600,\n type=int,\n )\n self.group_expl_param.add_argument(\n \"--simul_state\",\n help=\"Number of simultaneous states we explore with simulation manager (default : 5)\",\n default=5,\n type=int,\n )\n \n self.group_bin = self.parser.add_argument_group('Binary parameters')\n self.group_bin.add_argument(\n \"--n_args\",\n help=\"Number of symbolic arguments given to the binary (default : 0)\",\n default=0,\n type=int,\n )\n self.group_bin.add_argument(\n \"--conc_loop\",\n help=\"Number of symbolic arguments given to the binary (default : 1024)\",\n default=10240,\n type=int,\n )\n \n self.group_rats= self.parser.add_argument_group('RATs custom parameters')\n self.group_rats.add_argument(\n \"--count_block\",\n help=\"Count block (default : False)\",\n action=\"store_true\",\n \n )\n self.group_rats.add_argument(\n \"--sim_file\",\n help=\"Create SimFile with binary TODO (default : False)\",\n action=\"store_true\",\n \n )\n self.group_rats.add_argument(\n \"--track_command\",\n help=\"Track command loop of RATs (default : False)\",\n action=\"store_true\",\n \n )\n self.group_rats.add_argument(\n \"--ioc_report\",\n help=\"produces and IoC report (default : False)\",\n action=\"store_true\",\n \n )\n self.group_rats.add_argument(\n \"--hooks\",\n help=\"activates the hooks for time-consuming functions (default : False)\",\n action=\"store_true\",\n \n )\n \n self.group_scdg = self.parser.add_argument_group('SCDG creation parameter')\n self.group_scdg.add_argument(\n \"--min_size\",\n help=\"Minimum size required for a trace to be used in SCDG (default : 3)\",\n default=3,\n type=int,\n )\n self.group_scdg.add_argument(\n \"--disjoint_union\",\n help=\"Do we merge traces or use disjoint union ? (default : merge)\",\n action=\"store_true\",\n \n )\n self.group_scdg.add_argument(\n \"--not_comp_args\",\n help=\"Do we compare arguments to add new nodes when building graph ? (default : comparison enabled)\",\n action=\"store_true\",\n \n ) \n self.group_scdg.add_argument(\n \"--three_edges\",\n help=\"Do we use the three-edges strategy ? (default : False)\",\n action=\"store_true\",\n \n )\n self.group_scdg.add_argument(\n \"--not_ignore_zero\",\n help=\"Do we ignore zero when building graph ? (default : Discard zero)\",\n action=\"store_true\",\n \n )\n self.group_scdg.add_argument(\n \"--dir\",\n help=\" Directory to save outputs graph for gspan (default : output/runs/)\",\n default=\"output/runs/\",\n )\n self.group_scdg.add_argument(\n \"--discard_SCDG\",\n help=\"Do not keep intermediate SCDG in file (default : False)\",\n action=\"store_false\",\n \n )\n self.group_scdg.add_argument(\n \"--eval_time\",\n help=\"Keep intermediate SCDG in file (default : False)\",\n action=\"store_true\",\n \n )\n \n self.groupt = self.parser.add_argument_group('Thread parameter')\n self.groupt.add_argument(\n \"--pre_run_thread\",\n help=\"TODO (default : False)\",\n action=\"store_true\",\n ) \n self.groupt.add_argument(\n \"--runtime_run_thread\",\n help=\"TODO (default : False)\",\n action=\"store_true\",\n ) \n self.groupt.add_argument(\n \"--post_run_thread\",\n help=\"TODO (default : False)\",\n action=\"store_true\",\n ) \n \n self.group = self.parser.add_argument_group('Global parameter')\n self.group.add_argument(\n \"--approximate\",\n help=\"Symbolic approximation (default : False)\",\n action=\"store_true\",\n ) \n self.group.add_argument(\n \"--timeout\",\n help=\"Timeout in seconds before ending extraction (default : 200)\",\n default=1000,\n type=int,\n ) \n self.group.add_argument(\n \"--not_resolv_string\",\n help=\"Do we try to resolv references of string (default : False)\",\n action=\"store_true\",\n \n )\n self.group.add_argument(\n \"--exp_dir\",\n help=\" Directory to save SCDG extracted (default : output/runs/)\",\n default=\"output/runs/\",\n \n )\n self.group.add_argument(\n \"--memory_limit\",\n help=\"Skip binary experiment when memory > 90%% (default : False)\",\n action=\"store_true\",\n \n )\n self.group.add_argument(\n \"--verbose_scdg\",\n help=\"Verbose output during calls extraction (default : False)\",\n action=\"store_true\",\n \n )\n self.group.add_argument(\n \"--debug_error\",\n help=\"Debug error states (default : False)\",\n action=\"store_true\",\n \n )\n self.group.add_argument(\n \"--familly\",\n help=\"Familly of the malware (default : unknown)\",\n \n )\n self.group.add_argument(\"binary\", \n help=\"Name of the binary to analyze\",\n )\n self.group.add_argument(\n \"--sthread\",\n help=\"Number of thread used (default: 1)\",\n type=int,\n default=1,\n )\n \n self.tool_scdg = tool_scdg\n\n def update_tool(self,args):\n inputs = args.binary\n if not self.tool_scdg.print_on:\n self.tool_scdg.print_on = args.verbose\n self.tool_scdg.debug_error = args.debug_error\n expl_method = \"DFS\" if args.DFS else \\\n (\"BFS\" if args.BFS \\\n else (\"CDFS\" if args.CDFS \\\n else (\"DBFS\" if args.DBFS \\\n else (\"SDFS\" if args.SDFS \\\n else (\"SCDFS\" if args.SCDFS \\\n else (\"ThreadCDFS\" if args.ThreadCDFS \\\n else \"CBFS\"))))))\n\n familly = \"unknown\"\n if args.familly:\n familly = args.familly\n args.exp_dir = args.exp_dir + familly + \"/\"\n\n if args.timeout:\n self.tool_scdg.timeout = args.timeout\n if args.symb_loop:\n self.tool_scdg.jump_it = args.symb_loop\n if args.conc_loop:\n self.tool_scdg.loop_counter_concrete = args.conc_loop\n if args.eval_time:\n self.tool_scdg.eval_time = True\n\n self.tool_scdg.max_simul_state = args.simul_state\n\n self.tool_scdg.string_resolv = not args.not_resolv_string\n if args.limit_pause:\n self.tool_scdg.max_in_pause_stach = args.limit_pause\n if args.max_step:\n self.tool_scdg.max_step = args.max_step\n if args.max_deadend:\n self.tool_scdg.max_end_state = args.max_deadend\n \n self.tool_scdg.is_packed = args.packed\n\n if args.concrete_target_is_local:\n self.tool_scdg.concrete_target_is_local = True\n \n if self.tool_scdg.is_packed:\n if args.unipacker:\n mode = \"unipacker\"\n self.tool_scdg.log.info(\"Unpack with %s\",mode)\n self.tool_scdg.unpack_mode = mode\n elif args.symbion:\n mode = \"symbion\"\n self.tool_scdg.log.info(\"Unpack with %s\",mode)\n self.tool_scdg.unpack_mode = mode\n\n self.tool_scdg.inputs = inputs\n self.tool_scdg.expl_method = expl_method\n self.tool_scdg.familly = familly\n self.tool_scdg.memory_limit = args.memory_limit # Add custom value\n\n def parse_arguments(self, allow_unk = False, args_list=None):\n args = None\n if not allow_unk:\n args = self.parser.parse_args(args_list)\n else:\n args, unknown = self.parser.parse_known_args(args_list)\n\n return args\n","repo_name":"csvl/SEMA-ToolChain","sub_path":"src/SemaSCDG/helper/ArgumentParserSCDG.py","file_name":"ArgumentParserSCDG.py","file_ext":"py","file_size_in_byte":12410,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"6194840565","text":"def solution(s):\n answer = True \n s = list(s)\n if 4 == len(s) or len(s) == 6:\n answer = True\n for i in range(len(s)):\n # 아스키코드 값 i로\n i = ord(s[i])\n # 아스키코드 숫자 범위 내에 없다면 숫자가 아니므로\n # 계속 진행\n if ord('0') <= i <= ord('9'):\n continue\n else:\n answer = False\n else:\n answer = False\n return answer","repo_name":"sjoongh/algorithm-study","sub_path":"python/프로그래머스/문자열 다루기 기본.py","file_name":"문자열 다루기 기본.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38576678678","text":"__doc__=\"\"\"cpqFcaHostCntlr\n\ncpqFcaHostCntlr is an abstraction of a HP FC Host Bus Adapter.\n\n$Id: cpqFcaHostCntlr.py,v 1.1 2010/06/30 16:21:39 egor Exp $\"\"\"\n\n__version__ = \"$Revision: 1.1 $\"[11:-2]\n\nfrom HPExpansionCard import *\n\nclass cpqFcaHostCntlr(HPExpansionCard):\n \"\"\"FCA Host Bus Adapter object\"\"\"\n\n model = \"\"\n FWRev = \"\"\n ROMRev = \"\"\n wwpn = \"\"\n wwnn = \"\"\n\n statusmap ={1: (DOT_GREY, SEV_WARNING, 'other'),\n 2: (DOT_GREEN, SEV_CLEAN, 'Ok'),\n 3: (DOT_RED, SEV_CRITICAL, 'Failed'),\n 4: (DOT_RED, SEV_CRITICAL, 'Shutdown'),\n 5: (DOT_ORANGE, SEV_ERROR, 'Loop Degraded'),\n 6: (DOT_RED, SEV_CRITICAL, 'Loop Failed'),\n 7: (DOT_ORANGE, SEV_ERROR, 'Not Connected'),\n }\n\n # we monitor RAID Controllers\n monitor = True\n\n _properties = HPExpansionCard._properties + (\n {'id':'model', 'type':'string', 'mode':'w'},\n {'id':'FWRev', 'type':'string', 'mode':'w'},\n {'id':'ROMRev', 'type':'string', 'mode':'w'},\n {'id':'wwpn', 'type':'string', 'mode':'w'},\n {'id':'wwnn', 'type':'string', 'mode':'w'},\n )\n\n factory_type_information = (\n {\n 'id' : 'cpqFcaHostCntlr',\n 'meta_type' : 'cpqFcaHostCntlr',\n 'description' : \"\"\"Arbitrary device grouping class\"\"\",\n 'icon' : 'ExpansionCard_icon.gif',\n 'product' : 'ZenModel',\n 'factory' : 'manage_addCpqFcaHostCntlr',\n 'immediate_view' : 'viewCpqFcaHostCntlr',\n 'actions' :\n (\n { 'id' : 'status'\n , 'name' : 'Status'\n , 'action' : 'viewCpqFcaHostCntlr'\n , 'permissions' : (ZEN_VIEW,)\n },\n { 'id' : 'perfConf'\n , 'name' : 'Template'\n , 'action' : 'objTemplates'\n , 'permissions' : (ZEN_CHANGE_DEVICE, )\n },\n { 'id' : 'viewHistory'\n , 'name' : 'Modifications'\n , 'action' : 'viewHistory'\n , 'permissions' : (ZEN_VIEW_MODIFICATIONS,)\n },\n )\n },\n )\n\nInitializeClass(cpqFcaHostCntlr)\n","repo_name":"zenoss/Community-Zenpacks","sub_path":"ZenPacks.community.HPMon/ZenPacks/community/HPMon/cpqFcaHostCntlr.py","file_name":"cpqFcaHostCntlr.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"13285454697","text":"\"\"\"\nGiven an integer array nums, return all the different possible non-decreasing subsequences of the given array with at least two elements. You may return the answer in any order.\n\nExample 1:\nInput: nums = [4,6,7,7]\nOutput: [[4,6],[4,6,7],[4,6,7,7],[4,7],[4,7,7],[6,7],[6,7,7],[7,7]]\n\nExample 2:\nInput: nums = [4,4,3,2,1]\nOutput: [[4,4]]\n\nConstraints:\n1 <= nums.length <= 15\n-100 <= nums[i] <= 100\n\nanalysis:\nbacktracking\nTC: O(2^n * n)\nSC: O(2^n * n)\n\"\"\"\nfrom typing import List\n\n\nclass NonDecreasingSubsequences:\n def findSubsequences(self, nums: List[int]) -> List[List[int]]:\n res = set()\n arr = []\n\n def dfs(idx):\n if idx == len(nums):\n if len(arr) >= 2:\n res.add(tuple(arr))\n return\n if not arr or arr[-1] <= nums[idx]:\n arr.append(nums[idx])\n dfs(idx + 1)\n arr.pop()\n dfs(idx + 1)\n\n dfs(0)\n return res\n","repo_name":"DeanHe/Practice","sub_path":"LeetCodePython/NonDecreasingSubsequences.py","file_name":"NonDecreasingSubsequences.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73668716713","text":"#!/usr/bin/env python\n\nimport cProfile\n\nITERATIONS = 40\n\nclass ExtndedPolymerization:\n\n def __init__(self, polymer_template, element_pairs):\n self.polymer_template = polymer_template\n self.element_pairs = element_pairs\n self.magnitude_map = element_pairs.copy()\n self.base_counts_map = element_pairs.copy()\n self.counts_map = {char: 0 for char in set(list(self.polymer_template) + [char for string in self.element_pairs.values() for char in string])}\n\n\n def base_cases(self, iterations):\n for key, polymer in self.magnitude_map.items():\n self.magnitude_map[key] = self.element_pairs[key]\n for i in range(int(iterations / 2) - 1):\n self.magnitude_map[key] = self.polymeraze(self.magnitude_map[key])\n\n \n def base_count(self):\n for template in self.element_pairs:\n counts_map = {char: 0 for char in set(list(self.polymer_template) + [char for string in self.element_pairs.values() for char in string])}\n for char in self.magnitude_map[template]:\n counts_map[char] += 1\n\n self.base_counts_map[template] = counts_map\n\n\n\n def count(self, chain):\n for index, template in enumerate(self.templates(chain)):\n for key, value in self.base_counts_map[template].items():\n self.counts_map[key] += value\n if index > 0:\n self.counts_map[template[0]] -= 1\n\n\n def chain(self):\n polymers = []\n for index, template in enumerate(self.templates()):\n polymers.append(self.magnitude_map[template][1:] if index > 0 else self.magnitude_map[template])\n\n return \"\".join(polymers)\n\n\n def chain_polymers(self, iterations):\n self.base_cases(iterations)\n self.base_count()\n self.count(self.chain())\n print(self.max_minus_min())\n\n\n # def print_chain(self):\n # polymers = []\n # for index, template in enumerate(self.templates(self.chain())):\n # component = (self.magnitude_map[template])[1:] if index > 0 else self.magnitude_map[template]\n # polymers.append(component)\n\n # return \"\".join(polymers)\n\n\n\n def polymeraze(self, polymer):\n blocks = [polymer[0]]\n for i in range(len(polymer) - 1):\n blocks.append(self.element_pairs[polymer[i : i + 2]][-2:])\n\n return \"\".join(blocks)\n\n\n\n def templates(self, polymer_template=None):\n polymer_template = polymer_template or self.polymer_template\n return [\n chunk_0 + chunk_1\n for chunk_0, chunk_1 in zip(polymer_template, polymer_template[1:])\n ]\n\n\n def max_minus_min(self):\n max = {\"max\": 0}\n min = {\"min\": 999999999999999}\n for char, count in self.counts_map.items():\n if count > list(max.values())[0]:\n max = {char: count}\n\n if count < list(min.values())[0]:\n min = {char: count}\n\n return list(max.values())[0] - list(min.values())[0]\n\n @staticmethod\n def parse_lines(lines):\n polymer_template = \"\"\n pairs = []\n for line in lines:\n if \"->\" in line:\n pairs.append(line.split(\"->\"))\n if not polymer_template:\n polymer_template = line\n\n element_pairs = {\n pair[0].strip(): pair[0].strip()[0] + pair[1].strip() + pair[0].strip()[1]\n for pair in pairs\n }\n return [polymer_template, element_pairs]\n\n\n\nif __name__ == \"__main__\":\n with open(\"./input.txt\", \"r\") as fs:\n lines = fs.read().split(\"\\n\")\n\n # lines=\"\"\"NNCB\n\n # CH -> B\n # HH -> N\n # CB -> H\n # NH -> C\n # HB -> C\n # HC -> B\n # HN -> C\n # NN -> C\n # BH -> H\n # NC -> B\n # NB -> B\n # BN -> B\n # BB -> N\n # BC -> B\n # CC -> N\n # CN -> C\"\"\".split(\"\\n\")\n\n polymer_template, element_pairs = ExtndedPolymerization.parse_lines(lines)\n polymerizer = ExtndedPolymerization(polymer_template, element_pairs)\n\n\n cProfile.run(\"polymerizer.chain_polymers(ITERATIONS)\")\n","repo_name":"blauentag/AdventOfCode","sub_path":"2021/day_14/long_chain_polymerization.py","file_name":"long_chain_polymerization.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3587049568","text":"# -*- coding: utf-8 -*-\nclass Student(object):\n def __init__(self, name, gender):\n self.__name = name\n self.__gender = gender\n def get_gender(self):\n return self.__gender\n def set_gender(self,gender):\n self.__gender = gender\n\n# 测试:\nbart = Student('Bart', 'male')\nif bart.get_gender() != 'male':\n print('测试失败!')\nelse:\n bart.set_gender('female')\n if bart.get_gender() != 'female':\n print('测试失败!')\n else:\n print('测试成功!')\n","repo_name":"Vancheung/TestEngineering","sub_path":"_1_Programming/Python/Python_liaoxuefeng/_10_PrivateValue.py","file_name":"_10_PrivateValue.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"31928714806","text":"from app.main import db\nfrom flask_restplus import abort\n\n\ndef get_all_messiers_catalogues(object):\n \"\"\"fetch all object in data table\"\"\"\n messiers = object.query.all()\n return messiers\n\n\ndef get_a_messier_catalogue(object, messier_id):\n \"\"\"fetch object in data table by id\"\"\"\n messier = object.query.filter_by(id=messier_id).first()\n return messier\n\n\ndef create_a_messier_catalogue(object, data):\n \"\"\"\n insert new data to data table\n\n Parameters:\n object (model): data model\n data (dict): data object\n \"\"\"\n messier = object.query.filter_by(id=data['id']).first()\n if not messier:\n try:\n new_messier = object(**data)\n save_changes(new_messier)\n return {\"message\": \"Create a new messier successfully\"}, 201\n except Exception as e:\n print(e)\n abort(400, \"Cannot create a new messier\")\n else:\n return {\"message\": f\"Messier {data['id']} existed\"}, 200\n\n\ndef update_a_messier_in_catalogue(object, data):\n \"\"\"\n update data to database\n\n Parameters:\n object (model): data model\n data (dict): data object\n \"\"\"\n messier = object.query.filter_by(id=data['id']).first()\n if messier:\n try:\n result = object.query.filter(object.id == data['id']).update(data)\n db.session.commit()\n if result:\n return {\"message\": f\"Update successfully\"}, 202\n else:\n return {\"message\": f\"Update failed\"}, 400\n except Exception as e:\n print(e)\n abort(400, \"Cannot update messier\")\n else:\n return {\"message\": f\"Messier {data['id']} not existed\"}, 400\n\n\ndef delete_a_messier_in_catalogue(object, messier_id):\n \"\"\"\n delete a row in data table by id\n\n Parameters:\n object (model): data model\n messier_id (dict): messier id\n \"\"\"\n messier = object.query.filter_by(id=messier_id).first()\n if messier:\n try:\n result = object.query.filter(object.id == messier_id).delete()\n db.session.commit()\n if result:\n return {\"message\": f\"Delete successfully\"}, 200\n else:\n return {\"message\": f\"Delete failed\"}, 400\n except Exception as e:\n print(e)\n abort(400, \"Cannot delete messier\")\n else:\n return {\"message\": f\"Messier {messier_id} not existed\"}, 400\n\n\ndef save_changes(data):\n \"\"\"validation data and commit it to database\"\"\"\n db.session.add(data)\n db.session.commit()\n","repo_name":"nguyenkhacbaoanh/Messier-Catalogue","sub_path":"app/main/services/messier_services.py","file_name":"messier_services.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72393042152","text":"input = __import__('sys').stdin.readline\n\ntc = int(input())\nchange = [\n [36, 37, 38, 18, 19, 20, 45, 46, 47, 27, 28, 29],\n [42, 43, 44, 24, 25, 26, 51, 52, 53, 33, 34, 35],\n [6, 7, 8, 44, 41, 38, 11, 10, 9, 45, 48, 51],\n [2, 1, 0, 53, 50, 47, 15, 16, 17, 36, 39, 42],\n [0, 3, 6, 35, 32, 29, 9, 12, 15, 18, 21, 24],\n [8, 5, 2, 26, 23, 20, 17, 14, 11, 27, 30, 33]\n]\ncol = list('wyrogb')\ncaid = list('UDFBLR')\nidxs = dict()\nfor i, ele in enumerate(caid):\n idxs[ele] = i\n\n\ndef rotate(idx):\n ret = change[idx]\n tmp = [ans[ret[i]] for i in range(12)]\n for i in range(12):\n ans[ret[i]] = tmp[(i + 3) % 12]\n ret = cube[idx]\n temp = [[0] * 3 for _ in range(3)]\n for i in range(3):\n for j in range(3):\n temp[j][2 - i] = ans[ret[i][j]]\n for i in range(3):\n for j in range(3):\n ans[ret[i][j]] = temp[i][j]\n\n\nfor _ in range(tc):\n cube = [[[i * 9 + 3 * j + k for k in range(3)] for j in range(3)] for i in range(6)]\n\n ans = [col[i // 9] for i in range(54)]\n n = int(input())\n ary = input().split()\n for cmd in ary:\n idx, clock = idxs[cmd[0]], cmd[1]\n rotate(idx)\n if clock == '-':\n rotate(idx)\n rotate(idx)\n print(cube)\n for ele in cube[0]:\n ele = [ans[x] for x in ele]\n print(''.join(ele))\n","repo_name":"112224/algorithm","sub_path":"python3/5373 큐빙.py","file_name":"5373 큐빙.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30862006158","text":"\nimport numpy as np\nfrom flask import Flask, Blueprint, jsonify, request, render_template, flash, redirect, url_for\nimport os\nimport pandas as pd\nimport random\nfrom flask_login import login_required, current_user\nfrom datetime import datetime\nfrom app.capture_frames_run import frameCaptureRun\nfrom app.capture_frames_squat import frameCaptureSquat\nimport cv2\nimport gspread\nimport time\nfrom boto3 import session\n\nviews = Blueprint('views',__name__)\n\nsheet_id = '1YhmeuynXcs9Bo-Sl26HxsIkkrAbHdPMClxFh_nYVPvk'\n\n@views.route('/upload', methods=['GET', 'POST'])\ndef upload_vid():\n if request.method == 'GET':\n return render_template('upload.html',user=current_user)\n elif request.method == 'POST':\n ACCESS_ID = 'QPHTN5KR6NLVV6JRNPMG'\n SECRET_KEY = '6fXDDDfMtWPPNUBDJDYnwH8Xouh66mi0OLBZTbus8cA'\n\n sess = session.Session()\n client = sess.client('s3',\n region_name='ams3',\n endpoint_url='https://ams3.digitaloceanspaces.com',\n aws_access_key_id=ACCESS_ID,\n aws_secret_access_key=SECRET_KEY)\n\n if request.form['vid_type'] == 'squat':\n capture = frameCaptureSquat(request.form['frame'], request.form['side-direction'], request.form['height'], True)\n elif request.form['vid_type'] == 'run':\n capture = frameCaptureRun(request.form['frame'], request.form['side-direction'], request.form['height'], True)\n\n\n capture.init_metrics()\n vid_bytes = request.files['video'].read()\n with open('temp.mp4', \"wb\") as f:\n f.write(vid_bytes)\n uuid = capture.run('temp.mp4')\n if uuid == -1:\n message = 'Please upload a video that is less than 10 seconds!'\n return render_template('upload.html',user=current_user, message=message)\n\n # Upload a file to your Space\n client.upload_file('temp.mp4', # Path to local file\n 'pose-app', # Name of Space\n f'videos/{uuid}.mp4') # Name for remote file\n os.remove('temp.mp4')\n message='Success!'\n \n return message,request.form \n\n@views.route('/', methods=['GET', 'POST'])\n@login_required\ndef save_img():\n main_sheet = 'main'\n url_main = f'https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet={main_sheet}'\n \n logs_sheet = 'labels_log'\n url_logs = f'https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet={logs_sheet}'\n\n try:\n df_meta = pd.read_csv(url_main)\n except pd.errors.EmptyDataError as e:\n return 'Please upload a video at /upload' \n\n try:\n df_logs = pd.read_csv(url_logs)\n except pd.errors.EmptyDataError as e:\n df_logs = pd.DataFrame()\n \n index_list = list(range(len(df_meta)))\n \n df_logs['criteria'] = df_logs.apply(lambda x: x['user_email']+'-'+x['image_name'].split('/')[-1],axis=1)\n while True:\n if len(index_list) != 0:\n index = random.choice(index_list) ###check whether its squat or running\n if len(df_logs) == 0:\n break\n if current_user.email+'-'+df_meta.at[index,'filename'] in df_logs['criteria'].values:\n index_list.remove(index)\n else:\n break\n else:\n return 'You have no more frames to label'\n\n if request.method == 'GET':\n img_choice = df_meta.loc[index,'filename']\n img_frame = df_meta.loc[index,'frame']\n # do_dir = 'https://pose-app.ams3.digitaloceanspaces.com/static'\n img = os.path.join(img_frame,img_choice)\n print(url_for('static',filename=img))\n metadata = df_meta[df_meta['filename']==img_choice]\n metadata.dropna(inplace=True, axis=1)\n metadata = metadata.iloc[0]\n metadata = metadata.to_dict()\n return render_template('index.html', metadata = metadata, img_name=img, user=current_user)\n\n if request.method == 'POST':\n # index = random.randint(0,len(df_meta)-1) ###check whether its squat or running\n date = datetime.now().strftime(\"%Y/%m/%d-%H:%M:%S\")\n img_choice = df_meta.loc[index,'filename']\n img_frame = df_meta.loc[index,'frame']\n img = os.path.join(img_frame,img_choice)\n\n metadata = df_meta[df_meta['filename']==img_choice]\n metadata.dropna(inplace=True, axis=1)\n metadata = metadata.iloc[0]\n metadata = metadata.to_dict()\n form = request.form\n\n sa = gspread.service_account(filename=\"newagent-pss9-9047ed9cb1b8.json\")\n sh = sa.open_by_url('https://docs.google.com/spreadsheets/d/1YhmeuynXcs9Bo-Sl26HxsIkkrAbHdPMClxFh_nYVPvk')\n\n ### Labels\n labels_sheet = 'labels'\n url_labels = f'https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet={labels_sheet}'\n\n try:\n df_labels = pd.read_csv(url_labels)\n except pd.errors.EmptyDataError as e:\n df_labels = pd.DataFrame(columns=['index','img_name'])\n # if 'labels.csv' in os.listdir():\n # df_labels = pd.read_csv('labels.csv')\n # else:\n # df_labels = pd.DataFrame(columns=['index','img_name'])\n if form['img_name'] not in df_labels['img_name'].values:\n index = len(df_labels.index)+1\n if len(df_labels.index) == 0:\n df_labels.at[index,'index'] = index\n else:\n df_labels.at[index,'index'] = df_labels.at[index-2,'index']+1\n df_labels.at[index,'date'] = date\n df_labels.at[index,'video_type'] = form['video_type']\n df_labels.at[index,'frame'] = form['frame']\n df_labels.at[index,'side_direction'] = form['side_direction']\n for metric in form:\n df_labels.at[index, metric] = form[metric]\n df_labels.at[index,'no_labels'] = 1\n df_labels.at[index,'label_list'] = str([form['score']])\n else:\n index = df_labels[df_labels['img_name']==form['img_name']].index[0]\n # df_labels.at[index,'index'] = index\n # df_labels.at[index,'date'] = date\n # df_labels.at[index,'video_type'] = form['video_type']\n # df_labels.at[index,'frame'] = form['frame']\n # df_labels.at[index,'side_direction'] = form['side_direction']\n # df_labels.at[index,'img_name'] = form['img_name']\n label_list = eval(df_labels.at[index, 'label_list'])\n label_list.append(form['score'])\n df_labels.at[index, 'label_list'] = str(label_list)\n df_labels.at[index,'no_labels']+=1\n score = 0\n for single_score in eval(df_labels.at[index, 'label_list']):\n score+=int(single_score)\n df_labels.at[index,'score'] = score/int(df_labels.at[index,'no_labels'])\n \n \n df_labels.at[index,'score'] = round(float(df_labels.at[index,'score']),2)\n df_labels.at[index,'score_int'] = round(df_labels.at[index,'score'])\n df_labels = df_labels.fillna('')\n labels_wks = sh.worksheet('labels')\n labels_wks.update([df_labels.columns.values.tolist()] + df_labels.values.tolist())\n # df_labels.to_csv('labels.csv',index=False)\n\n\n ### Logs\n # if 'label_log.csv' in os.listdir():\n # df_logs = pd.read_csv('label_log.csv')\n # else:\n # df_logs = pd.DataFrame()\n logs_sheet = 'labels_log'\n url_logs = f'https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet={logs_sheet}'\n\n try:\n df_logs = pd.read_csv(url_logs)\n except pd.errors.EmptyDataError as e:\n df_logs = pd.DataFrame()\n\n index = len(df_logs.index)+1\n date = datetime.now().strftime(\"%Y/%m/%d-%H:%M:%S\")\n user_id = current_user.id\n user_email = current_user.email\n username = current_user.first_name\n \n chosen_filepath = df_meta[df_meta['filename']==form['img_name'].split('/')[1]]\n video_type = chosen_filepath['video_type'].values[0]\n frame_angle = chosen_filepath['frame'].values[0]\n side_direction = chosen_filepath['side_direction'].values[0]\n \n df_logs.at[index,'index'] = index\n df_logs.at[index,'date'] = date\n df_logs.at[index,'user_id'] = user_id\n df_logs.at[index,'user_email'] = user_email\n df_logs.at[index,'username'] = username\n df_logs.at[index,'image_name'] = form['img_name']\n df_logs.at[index,'video_type'] = video_type\n df_logs.at[index,'frame_angle'] = frame_angle\n df_logs.at[index,'side_direction'] = side_direction\n df_logs.at[index,'score'] = int(form['score'])\n\n df_logs = df_logs.fillna('')\n logs_wks = sh.worksheet('labels_log')\n logs_wks.update([df_logs.columns.values.tolist()] + df_logs.values.tolist())\n return render_template('index.html', metadata = metadata, img_name=img, user=current_user)\n","repo_name":"pardeep4561/python-flask","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9057,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"7869171884","text":"#!/usr/bin/env python\n\n#-*- coding: utf-8 -*-\nimport os\n\ndef add2group():\n\tdir=raw_input('please input the name of new group:')\n\tp=open(dir,'a')\n\tf=open('list.list','r')\n\tfilename=raw_input('please input the filename you want to add to the group:')\n\tfor line in f.readlines():\n\t\tif filename in os.path.splitext(os.path.basename(line))[0]:\n\t\t\tp.write(line)\n\tp.close()\n\tf.close()\n\nif __name__=='__main__':\n\tadd2group()\n","repo_name":"xiyoulinux/xybook","sub_path":"test/add2group.py","file_name":"add2group.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10999904183","text":"from django.shortcuts import redirect\nfrom django_unicorn.components import UnicornView\n\nfrom crosswordApp.Classes.Crossword import Crossword\nfrom crosswordApp.Classes.User import User\nfrom pymongo import MongoClient\n\nimport json\n\nclient = MongoClient(\"mongodb+srv://Group20:Group20@cluster0.vl47pk0.mongodb.net/?retryWrites=true&w=majority\")\ndb = client['CrossWordManagement']\ncollections = db['crosswordApp_crossword']\n\n\nclass CreatecrosswordmanualView(UnicornView):\n username = \"\"\n\n title = \"\"\n description = \"\"\n\n rows: str = '0'\n cols: str = '0'\n grid = []\n characters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',\n 'U', 'V', 'W', 'X', 'Y', 'Z', '_']\n activeChar = '_'\n activeStep = 1\n roadMapStatusLabel1 = \"\"\n roadMapStatusLabel2 = \"Empty\"\n roadMapStatusLabel3 = \"Empty\"\n roadMapStatusLabel4 = \"Empty\"\n roadMapStatusLabel5 = \"Empty\"\n\n wordsHorizontalStart = []\n wordsVerticalStart = []\n wordsHorizontal = []\n wordsVertical = []\n\n horClue1 = \"\"\n horClue2 = \"\"\n horClue3 = \"\"\n horClue4 = \"\"\n horClue5 = \"\"\n horClue6 = \"\"\n horClue7 = \"\"\n horClue8 = \"\"\n horClue9 = \"\"\n horClue10 = \"\"\n horClue11 = \"\"\n horClue12 = \"\"\n horClue13 = \"\"\n horClue14 = \"\"\n horClue15 = \"\"\n horClues = {}\n verClue1 = \"\"\n verClue2 = \"\"\n verClue3 = \"\"\n verClue4 = \"\"\n verClue5 = \"\"\n verClue6 = \"\"\n verClue7 = \"\"\n verClue8 = \"\"\n verClue9 = \"\"\n verClue10 = \"\"\n verClue11 = \"\"\n verClue12 = \"\"\n verClue13 = \"\"\n verClue14 = \"\"\n verClue15 = \"\"\n verClues = {}\n currentStep:str = 'Enter Title and Description'\n message_content: str = \"\"\n message_type = \"\"\n displayMessage: str = \"no\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(**kwargs)\n username = kwargs.get(\"username\")\n\n\n def incrementStep(self):\n if self.activeStep == 1:\n if self.title == \"\":\n self.message_content = \"Please enter a title\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n if self.description == \"\":\n self.message_content = \"Please enter a description\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n self.activeStep += 1\n if self.activeStep == 2:\n self.roadMapStatusLabel2 = \"\"\n self.displayMessage = \"no\"\n self.currentStep = \"Generate Grid\"\n if self.activeStep == 3:\n self.roadMapStatusLabel3 = \"\"\n self.displayMessage = \"no\"\n self.currentStep = \"Fill Grid\"\n if self.activeStep == 4:\n self.roadMapStatusLabel4 = \"\"\n self.displayMessage = \"no\"\n self.currentStep = \"Enter Clues\"\n if self.activeStep == 5:\n self.roadMapStatusLabel5 = \"\"\n self.currentStep = \"Saved Successfully\"\n self.displayMessage = \"no\"\n print(self.activeStep)\n\n def roadMapBack(self):\n if self.activeStep == 2:\n self.roadMapStatusLabel2 = \"Empty\"\n if self.activeStep == 3:\n self.roadMapStatusLabel3 = \"Empty\"\n if self.activeStep == 4:\n self.roadMapStatusLabel4 = \"Empty\"\n if self.activeStep == 5:\n self.roadMapStatusLabel5 = \"Empty\"\n if self.activeStep > 1:\n self.activeStep -= 1\n\n def create_crossword(self):\n print(self.title)\n print(self.description)\n print(self.rows)\n print(self.cols)\n\n def createGrid(self):\n print(\"Create grid called\")\n gridRow = []\n self.grid = []\n print(type(self.rows))\n print(type(self.cols))\n\n # if self.rows == '':\n # self.message_content = \"Please enter a number of rows\"\n # self.message_type = \"errorMessage\"\n # self.displayMessage = \"yes\"\n # return\n #\n # if self.cols == '':\n # self.message_content = \"Please enter a number of columns\"\n # self.message_type = \"errorMessage\"\n # self.displayMessage = \"yes\"\n # return\n\n if not self.rows.isnumeric():\n self.message_content = \"Number of rows must be positive integer\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n if not self.cols.isnumeric():\n self.message_content = \"Number of columns must ba positive integer\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n\n if type(self.rows) != int:\n print(\"rows is not an int\")\n rowsInt = int(self.rows)\n print(\"number of rows is: \" + str(rowsInt))\n if rowsInt<3:\n self.message_content = \"Number of rows must be greater than 3\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n if rowsInt>15:\n self.message_content = \"Number of rows must be less than 15\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n\n if type(self.cols) != int:\n colsInt = int(self.cols)\n print(\"number of cols is: \" + str(colsInt))\n if colsInt<3:\n self.message_content = \"Number of columns must be greater than 3\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n if colsInt>15:\n self.message_content = \"Number of columns must be less than 15\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n\n rowsInt = int(self.rows)\n colsInt = int(self.cols)\n print(type(self.rows))\n print(type(self.cols))\n\n\n for i in range(0, rowsInt):\n for j in range(0, colsInt):\n gridRow.append('_')\n self.grid.append(gridRow)\n gridRow = []\n print(self.grid)\n\n self.incrementStep()\n\n def cellClicked(self, cell):\n rowIndex, colIndex = cell[0], cell[1]\n print(\"Cell clicked at: \", rowIndex, colIndex)\n self.grid[rowIndex][colIndex] = self.activeChar\n\n def setActiveChar(self, clickChar):\n print(\"Active char: \", clickChar)\n self.activeChar = clickChar\n print(\"Active char: \", self.activeChar)\n\n def getWordVerticalHelper(self, start):\n print(\"Get word vertical helper called\")\n print(\"Start: \", start)\n word = \"\"\n for i in range(start[0], int(self.rows)):\n if self.grid[i][start[1]] != '_':\n word += self.grid[i][start[1]]\n else:\n break\n return word\n\n def getWordHorizontalHelper(self, start):\n print(\"Get word horizontal helper called\")\n print(\"Start: \", start)\n word = \"\"\n for i in range(start[1], int(self.cols)):\n if self.grid[start[0]][i] != '_':\n word += self.grid[start[0]][i]\n else:\n break\n return word\n\n def getWordOrient(self):\n self.wordsHorizontalStart = []\n self.wordsVerticalStart = []\n self.wordsHorizontal = []\n self.wordsVertical = []\n rowsInt = int(self.rows)\n colsInt = int(self.cols)\n for i in range(0, int(self.rows)):\n for j in range(0, int(self.cols)):\n if self.grid[i][j] != '_':\n if i == 0 and j == 0:\n if self.grid[i][j + 1] != '_':\n self.wordsHorizontalStart.append([i, j])\n if self.grid[i + 1][j] != '_':\n self.wordsVerticalStart.append([i, j])\n elif i == 0:\n if j-1 >=0 and self.grid[i][j - 1] == '_' and j+1=0 and self.grid[i - 1][j] == '_' and i+1 < rowsInt and self.grid[i + 1][j] != '_':\n self.wordsVerticalStart.append([i, j])\n if self.grid[i][j + 1] != '_':\n self.wordsHorizontalStart.append([i, j])\n else:\n if j-1>=0 and self.grid[i][j - 1] == '_' and j+1=0 and self.grid[i - 1][j] == '_' and i+1=0:\n if self.grid[i-1][j]!='_':\n hasAdjacent = True\n if j+1=0:\n if self.grid[i][j-1]!='_':\n hasAdjacent = True\n\n if hasAdjacent == False:\n self.message_content = f\"Cell {i+1}, {j+1} is not adjacent to any other cell\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n\n if self.wordsHorizontal == [] and self.wordsVertical == []:\n self.message_content = \"At least two cells must be adjacent\"\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n return\n\n self.incrementStep()\n\n def getClues(self):\n\n if self.verClue1 != \"\":\n self.verClues['0'] = self.verClue1\n if self.verClue2 != \"\":\n self.verClues['1'] = self.verClue2\n if self.verClue3 != \"\":\n self.verClues['2'] = self.verClue3\n if self.verClue4 != \"\":\n self.verClues['3'] = self.verClue4\n if self.verClue5 != \"\":\n self.verClues['4'] = self.verClue5\n\n if self.horClue1 != \"\":\n self.horClues['0'] = self.horClue1\n if self.horClue2 != \"\":\n self.horClues['1'] = self.horClue2\n if self.horClue3 != \"\":\n self.horClues['2'] = self.horClue3\n if self.horClue4 != \"\":\n self.horClues['3'] = self.horClue4\n if self.horClue5 != \"\":\n self.horClues['3'] = self.horClue5\n\n self.verClues = dict(sorted(self.verClues.items()))\n self.horClues = dict(sorted(self.horClues.items()))\n\n\n print(\"Username in get clues: \", self.username)\n print(\"Get clues called\")\n print(\"Title: \", self.title)\n print(\"Description: \", self.description)\n print(\"Rows: \", self.rows)\n print(\"Columns: \", self.cols)\n print(\"Grid: \", self.grid)\n print(\"Words horizontal: \", self.wordsHorizontal)\n print(\"Words vertical: \", self.wordsVertical)\n print(\"Words horizontal start: \", self.wordsHorizontalStart)\n print(\"Words vertical start: \", self.wordsVerticalStart)\n print(\"Vertical clues: \", self.verClues)\n print(\"Horizontal clues: \", self.horClues)\n newCrossword = Crossword(self.username, self.title, self.description, int(self.rows), int(self.cols), self.verClues,self.horClues, self.wordsHorizontal, self.wordsVertical, self.grid, self.wordsHorizontalStart, self.wordsVerticalStart)\n print(\"New crossword: \", newCrossword)\n\n if newCrossword.check():\n collections.insert_one(newCrossword.__dict__)\n self.message_content = \"Crossword saved successfully\"\n self.message_type = \"successMessage\"\n self.displayMessage = \"yes\"\n self.incrementStep()\n else:\n self.message_content = newCrossword.message\n self.message_type = \"errorMessage\"\n self.displayMessage = \"yes\"\n\n def hideMessage(self):\n print(\"hide message called\")\n self.displayMessage = \"no\"\n\n def goHome(self):\n print(\"go home called\")\n return redirect(\"/home\")\n\n def createAnother(self):\n print(\"create another called\")\n return redirect(\"/create_crossword_manual/\")","repo_name":"Rudra3012/IT314_Project_20","sub_path":"code/IT314_Project_20/crosswordApp/components/createCrosswordManual.py","file_name":"createCrosswordManual.py","file_ext":"py","file_size_in_byte":13427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33708889557","text":"def calcute_GCD():\r\n global GCD\r\n a = int(input(\"Enter a: \"))\r\n b = int(input(\"Enter b: \"))\r\n if a <= 0 or b<=0:\r\n print(\"a and b cannot be zero or negative. Try again \")\r\n calcute_GCD()\r\n if a > b:\r\n Min = b\r\n else:\r\n Min = a\r\n for i in range(1, Min + 1):\r\n if (a % i == 0) & (b % i == 0):\r\n GCD = i\r\n print(\"GCD: \", GCD)\r\n e = input(\"If you want to continue, enter, 'yes', otherwise enter 'no' :\")\r\n if e == \"yes\":\r\n calcute_GCD()\r\n if e == \"no\":\r\n exit()\r\ncalcute_GCD()\r\n","repo_name":"AliFateminia/Practice_Python_04","sub_path":"Practice_4_7.py","file_name":"Practice_4_7.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28198166407","text":"import sys\nsys.path.append(\"./PyTorchEMD/\")\nimport glob\nimport os\nimport re\nimport time\nimport torch\nimport graphx\nfrom sat2lidar_dataset import Sat2LidarDataset\nfrom torch.utils.tensorboard import SummaryWriter\nimport neuralnet_pytorch.gin_nnt as gin\nfrom graphx.networks import *\nfrom sat2lidar_model import Sat2LidarNetwork\nimport engine\n\ndef get_graphx_model(args):\n config_file = args.config\n gin.external_configurable(CNN18Encoder, 'cnn18_enc')\n gin.external_configurable(PointCloudEncoder, 'pc_enc')\n gin.external_configurable(PointCloudDecoder, 'pc_dec')\n gin.external_configurable(PointCloudResDecoder, 'pc_resdec')\n gin.external_configurable(PointCloudResGraphXDecoder, 'pc_resgraphxdec')\n gin.external_configurable(PointCloudResGraphXUpDecoder, 'pc_upresgraphxdec')\n gin.external_configurable(PointCloudResLowRankGraphXUpDecoder, 'pc_upreslowrankgraphxdec')\n\n @gin.configurable('GraphX')\n def create_model(name, img_enc, pc_enc, pc_dec, adain=True, projection=True, decimation=None, color_img=True, n_points=250, bs=4, checkpoint_folder=None):\n if decimation is not None:\n pc_dec = partial(pc_dec, decimation=decimation)\n print(\"Number of points = \", n_points)\n net = PointcloudDeformNet((bs,) + (3 if color_img else 1, 224, 224), (bs, n_points, 3), img_enc, pc_enc, pc_dec, adain=adain, projection=projection, weight_decay=None)\n\n mon.model_name = name\n mon.set_path('./graphx_weight/')\n states = mon.load('training.pt', method='torch', map_location='cpu')\n if states != None:\n net.load_state_dict(states['model_state_dict'])\n return net, n_points\n \n gin.parse_config_file(config_file)\n net, n_points = create_model()\n return net, n_points\n\ndef collate_fn(batch):\n return tuple(zip(*batch))\n\ndef log_loss_info(writer, mode, losses, epoch):\n for loss, val in losses.items():\n writer.add_scalar(\"log/Loss/{}_{}\".format(mode, loss), val, epoch)\n\ndef main(args):\n\n batch_size = 4\n device = \"cuda:\"+args.gpu\n print(\"\\ndevice: {}\".format(device))\n alpha = torch.tensor(1).to(device)\n graphx_model, n_points = get_graphx_model(args)\n graphx_model = graphx_model.to(device)\n torch.cuda.empty_cache()\n model = Sat2LidarNetwork(graphx_model, n_points, device).to(device)\n\n print('Args: ', args)\n\n # ---------------------- prepare data loader ------------------------------- #\n \n _d_train = Sat2LidarDataset(image_dir = os.path.join(args.data_dir, os.path.join('train', 'image_filtered')), pc_num = n_points, ann_dir = os.path.join(args.data_dir, os.path.join('train', 'annotation')), mode='train')\n _d_val = Sat2LidarDataset(image_dir = os.path.join(args.data_dir, os.path.join('val', 'image_filtered')), pc_num = n_points, ann_dir = os.path.join(args.data_dir, os.path.join('val', 'annotation')), mode='val')\n \n d_train = torch.utils.data.DataLoader(_d_train, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn)\n d_val = torch.utils.data.DataLoader(_d_val, batch_size=1, shuffle=True, num_workers=4, collate_fn=collate_fn)\n \n # -------------------------------------------------------------------------- #\n \n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=0) \n milestones = [200, 800, 1300, 2000]\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=0.3)\n \n start_epoch = 0\n best_eval_loss = 99999999\n prefix, ext = os.path.splitext(args.ckpt_path)\n\n ckpts = glob.glob(prefix + \"_best\" + \"-*\" + ext)\n ckpts.sort(key=lambda x: int(re.search(r\"-(\\d+){}\".format(ext), os.path.split(x)[1]).group(1)))\n if ckpts:\n checkpoint = torch.load(ckpts[-1], map_location=device) # load last checkpoint\n model.load_state_dict(checkpoint[\"model\"])\n del checkpoint\n torch.cuda.empty_cache()\n best_eval_loss = engine.evaluate(model, d_val, device, alpha, False, args)['total_loss']\n\n ckpts = glob.glob(prefix + \"-*\" + ext)\n ckpts.sort(key=lambda x: int(re.search(r\"-(\\d+){}\".format(ext), os.path.split(x)[1]).group(1)))\n if ckpts:\n checkpoint = torch.load(ckpts[-1], map_location=device) # load last checkpoint\n model.load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n start_epoch = checkpoint[\"epochs\"]\n lr_scheduler.load_state_dict(checkpoint[\"lr_scheduler\"])\n del checkpoint\n torch.cuda.empty_cache()\n\n since = time.time()\n print(\"\\nalready trained: {} epochs; to {} epochs\".format(start_epoch, args.epochs))\n \n # ------------------------------- train ------------------------------------ #\n writer = SummaryWriter()\n\n for epoch in range(start_epoch, args.epochs):\n print(\"\\nepoch: {}\".format(epoch + 1))\n best = False\n \n A = time.time()\n\n train_losses = engine.train_one_epoch(model, optimizer, d_train, device, epoch, alpha, args)\n\n A = time.time() - A\n\n if epoch > args.warmup_epochs:\n lr_scheduler.step()\n\n print('Train loss: ', train_losses)\n\n log_loss_info(writer, 'train', train_losses, epoch)\n \n B = time.time()\n eval_losses = engine.evaluate(model, d_val, device, alpha, False, args)\n B = time.time() - B\n\n if best_eval_loss > eval_losses['total_loss']:\n best_eval_loss = eval_losses['total_loss']\n best = True\n\n trained_epoch = epoch + 1\n print(\"training: {:.2f} s, evaluation: {:.2f} s\".format(A, B))\n print('Validation loss: ', eval_losses)\n\n log_loss_info(writer, 'val', eval_losses, epoch)\n\n engine.save_ckpt(model, optimizer, lr_scheduler, trained_epoch, args.ckpt_path, eval_info=str(eval_losses), best=best)\n \n # -------------------------------------------------------------------------- #\n writer.close()\n print(\"\\ntotal time of this training: {:.2f} s\".format(time.time() - since))\n if start_epoch < args.epochs:\n print(\"already trained: {} epochs\\n\".format(trained_epoch))\n \nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--gpu\", type=str, default='0')\n parser.add_argument(\"--config\", default = \"./configs/graphx.gin\")\n parser.add_argument(\"--use-cuda\", action=\"store_true\")\n parser.add_argument(\"--dataset\", default=\"sat2lidar\")\n parser.add_argument(\"--data-dir\", default=\"./datasets/extended_dataset/aggregated/splitted\")\n parser.add_argument(\"--ckpt-path\")\n parser.add_argument(\"--results\", default=\"./results/val.res\")\n parser.add_argument(\"--warmup-epochs\", type=int, default=1)\n parser.add_argument(\"--lr\", type=float, default=0.0001)\n parser.add_argument(\"--epochs\", type=int, default=2000)\n parser.add_argument(\"--print-freq\", type=int, default=100, help=\"frequency of printing losses\")\n args = parser.parse_args()\n \n if args.ckpt_path is None:\n args.ckpt_path = \"./graphx_{}.pth\".format(args.dataset)\n if args.results is None:\n args.results = os.path.join(os.path.dirname(args.ckpt_path), \"results.pth\")\n \n main(args)\n","repo_name":"pittcps/sat2pc","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"34330357657","text":"import os\nimport sys\nimport json\nimport time\nimport copy\n\nsys.path.append(sys.path[-1] + os.sep + 'core')\nprint(sys.path)\n\nfrom library import *\nfrom full_connected import *\nfrom convolution import *\nfrom max_pooling import *\n# from ensemble import *\nfrom loader_images import *\n\n\nos.system('color')\n\nendc = '\\033[0m'\nreset = '\\033[0m'\nbold = '\\033[01m'\ndisable = '\\033[02m'\nunderline = '\\033[04m'\nreverse = '\\033[07m'\nstrikethrough = '\\033[09m'\ninvisible = '\\033[08m'\n\nblack = '\\033[30m'\nred = '\\033[31m'\ngreen = '\\033[32m'\norange = '\\033[33m'\nblue = '\\033[34m'\npurple = '\\033[35m'\ncyan = '\\033[36m'\nlightgrey = '\\033[37m'\ndarkgrey = '\\033[90m'\nlightred = '\\033[91m'\nlightgreen = '\\033[92m'\nyellow = '\\033[93m'\nlightblue = '\\033[94m'\npink = '\\033[95m'\nlightcyan = '\\033[96m'\n\n# background\nbg_black = '\\033[40m'\nbg_red = '\\033[41m'\nbg_green = '\\033[42m'\nbg_orange = '\\033[43m'\nbg_blue = '\\033[44m'\nbg_purple = '\\033[45m'\nbg_cyan = '\\033[46m'\nbg_lightgrey = '\\033[47m'\n\n\nclass nn:\n def __init__(self, layers=[],\n file_path=None,\n debug=False\n ):\n self.layers = layers\n\n self.file_path = file_path\n if self.file_path is None:\n self.file_path = '~' + os.sep + 'dt1.json'\n self.file_path = self.file_path.replace('~', sys.path[0])\n\n self.debug = debug\n\n self.epochs = 0\n\n self.error = 1\n self.error_min = 1\n self.error_start = 1\n\n self.data_min = []\n self.data_start = []\n\n def save(self):\n with open(self.file_path, 'w', encoding='utf-8') as file:\n data = []\n for layer in self.layers:\n data.append(dict(type=layer.__class__.__name__))\n if layer.__class__ == FullConnected:\n data[-1]['size'] = layer.size\n data[-1]['learning_rate'] = layer.learning_rate\n data[-1]['momentum'] = layer.momentum\n data[-1]['activation'] = [i.__class__.__name__ for i in layer.activation]\n data[-1]['loss'] = layer.loss.__class__.__name__\n data[-1]['weights'] = layer.w\n data[-1]['biases'] = layer.b\n elif layer.__class__ == Convolution:\n data[-1]['channels'] = layer.channels\n data[-1]['height'] = layer.height - 2 * layer.padding\n data[-1]['width'] = layer.width - 2 * layer.padding\n data[-1]['matrix_c'] = layer.matrix_c\n data[-1]['matrix_h'] = layer.matrix_h\n data[-1]['matrix_w'] = layer.matrix_w\n data[-1]['padding'] = layer.padding\n data[-1]['stride_h'] = layer.stride_h\n data[-1]['stride_w'] = layer.stride_w\n data[-1]['activation'] = layer.activation.__class__.__name__\n data[-1]['learning_rate'] = layer.learning_rate\n data[-1]['momentum'] = layer.momentum\n data[-1]['weights'] = layer.w\n data[-1]['biases'] = layer.b\n elif layer.__class__ == MaxPooling:\n data[-1]['channels'] = layer.channels\n data[-1]['height'] = layer.height\n data[-1]['width'] = layer.width\n data[-1]['matrix_h'] = layer.matrix_h\n data[-1]['matrix_w'] = layer.matrix_w\n\n json.dump(data, file)\n\n def load(self):\n with open(self.file_path, 'r', encoding='utf-8') as file:\n self.layers = []\n\n for layer in json.load(file):\n if layer['type'] == 'FullConnected':\n self.layers.append(FullConnected(\n size=layer['size'],\n activation=[eval(i + '()') for i in layer['activation']],\n loss=eval(layer['loss'] + '()'),\n learning_rate=layer['learning_rate'],\n momentum=layer['momentum']\n ))\n self.layers[-1].w = layer['weights']\n self.layers[-1].b = layer['biases']\n elif layer['type'] == 'Convolution':\n self.layers.append(Convolution(\n channels=layer['channels'],\n height=layer['height'],\n width=layer['width'],\n matrix_c=layer['matrix_c'],\n matrix_h=layer['matrix_h'],\n matrix_w=layer['matrix_w'],\n padding=layer['padding'],\n stride_h=layer['stride_h'],\n stride_w=layer['stride_w'],\n activation=eval(layer['activation'] + '()'),\n learning_rate=layer['learning_rate'],\n momentum=layer['momentum']\n ))\n self.layers[-1].w = layer['weights']\n self.layers[-1].b = layer['biases']\n elif layer['type'] == 'MaxPooling':\n self.layers.append(MaxPooling(\n channels=layer['channels'],\n height=layer['height'],\n width=layer['width'],\n matrix_h=layer['matrix_h'],\n matrix_w=layer['matrix_w']\n ))\n\n def result(self):\n return [*self.layers[-1].x[-1]]\n\n def define_err(self):\n for layer in self.layers:\n layer.define_err()\n\n def forward(self, data_input):\n for layer in self.layers:\n if layer.__class__ == FullConnected and len(self.layers) > 1:\n x = []\n for chanel in data_input:\n for row in chanel:\n x += row\n data_input = x\n\n layer.forward(data_input)\n data_input = layer.x\n\n def backward(self, data_output):\n for layer in self.layers[::-1]:\n layer.backward(data_output)\n data_output = layer.err\n\n if layer.__class__ == FullConnected and len(self.layers) > 1:\n data_output = data_output[0]\n channels = len(self.layers[-2].x)\n height = len(self.layers[-2].x[0])\n width = len(self.layers[-2].x[0][0])\n err = []\n for i in range(channels):\n err.append([])\n for j in range(height):\n err[i].append([])\n for l in range(width):\n err[i][j].append(data_output[i * height * width + j * width + l])\n data_output = err\n\n def calculate_error(self, data):\n self.error = 0\n\n for i in range(len(data)):\n self.forward(data[i][0])\n for j in range(len(data[i][1])):\n self.error += abs(data[i][1][j] - self.layers[-1].x[-1][j])\n self.error /= len(data[i][1])\n self.error /= len(data)\n\n def train(self, data, epochs=10000):\n for epoch in range(epochs):\n # start_epoch = time.time()\n i = random.randint(0, len(data) - 1)\n self.forward(data[i][0])\n self.backward(data[i][1])\n # if self.debug:\n # print(f'time {round(time.time() - start_epoch, 2)}')\n self.epochs += epochs\n self.calculate_error(data)\n\n def train_alpha(self, data, alpha=0.001):\n for i in range(len(data)):\n self.forward(data[i][0])\n self.data_start.append(self.result())\n print(cyan, i, self.result(), '->', data[i][1], endc)\n self.calculate_error(data)\n self.error_start = self.error\n print(cyan, self.error_start, endc)\n\n start = time.time()\n while self.error > alpha:\n\n if self.error < self.error_min:\n self.error_min = self.error\n self.data_min = copy.deepcopy(self.data_start)\n self.file_path = self.file_path[:self.file_path.rfind(os.sep)] + os.sep + 'tmp_' + self.file_path[self.file_path.rfind(os.sep) + 1:]\n self.save()\n self.file_path = self.file_path.replace('tmp_', '')\n\n self.train(data, 75)\n self.save()\n\n if self.debug:\n for j in range(len(data)):\n self.forward(data[j][0])\n print(green, j, self.data_start[j], '->', self.data_min[j], '->', self.layers[-1].x[-1], '->', data[j][1], endc)\n print(self.error_start, '->', self.error_min, '->', self.error)\n print(self.epochs)\n\n print(yellow + f'time {round(time.time() - start, 2)}' + endc)\n\n def info(self):\n print(f'error = {self.error},')\n print(f'epochs = {self.epochs},')\n for layer in self.layers:\n print(layer.__class__.__name__ + ':')\n if layer.__class__ == FullConnected:\n print(f' size = {layer.size},')\n print(f' activation = {\", \".join([i.__class__.__name__ for i in layer.activation])}')\n print(f' loss = {layer.loss.__class__.__name__}')\n print(f' learning_rate = {layer.learning_rate}')\n print(f' momentum = {layer.momentum}')\n elif layer.__class__ == Convolution:\n print(f' data_input_size = {layer.channels} x {layer.height - 2 * layer.padding} x {layer.width - 2 * layer.padding},')\n print(f' size = {layer.matrix_c} x {1 + (layer.height - layer.matrix_h) // layer.stride_h} x {1 + (layer.width - layer.matrix_w) // layer.stride_w}')\n print(f' matrix_size = {layer.matrix_c} x {layer.matrix_h} x {layer.matrix_w}')\n print(f' stride_size = {layer.stride_h} x {layer.stride_w}')\n print(f' padding = {layer.padding}')\n print(f' activation = {layer.activation.__class__.__name__}')\n print(f' learning_rate = {layer.learning_rate}')\n print(f' momentum = {layer.momentum}')\n elif layer.__class__ == MaxPooling:\n print(f' data_input_size = {layer.channels} x {layer.height} x {layer.width}')\n print(f' size = {layer.channels} x {layer.height // layer.matrix_h} x {layer.width // layer.matrix_w}')\n print(f' matrix_size = {layer.matrix_h} x {layer.matrix_w}')\n","repo_name":"takayama3/project_nn","sub_path":"core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6626873402","text":"import argparse\nimport importlib\nfrom types import ModuleType\nfrom typing import List, Optional, Tuple\n\nimport cf_genie.logger as logger\nfrom cf_genie.utils import Timer\n\n# isort: off\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # disable tensorflow logging\n# isort: on\n\n\ndef get_task_module(task):\n return f'cf_genie.tasks.{task}'\n\n\ndef import_module(task: str) -> ModuleType:\n try:\n return importlib.import_module(get_task_module(task))\n except ModuleNotFoundError as e:\n e.msg = f'Task {task} is not present in the cf_genie.tasks module'\n raise e\n\n\ndef parse_prog_arg(prog_arg: str) -> Tuple[str, Optional[str]]:\n prog_arg_split = prog_arg.split('=')\n if len(prog_arg_split) > 2:\n raise ValueError(f\"Found more than one '=' in program argument {prog_arg}\")\n\n if len(prog_arg_split) == 1:\n return prog_arg_split[0], None\n else:\n return prog_arg_split[0], prog_arg_split[1]\n\n\ndef flatten_args(prog_args: List[Tuple[str, Optional[str]]]):\n result = []\n for arg, value in prog_args:\n result.append(f'--{arg}')\n if value:\n result.append(str(value))\n return result\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Run a task inside cf_genie.tasks')\n parser.add_argument('task', type=import_module, help='Python module inside cf_genie.tasks')\n parser.add_argument(\n '--prog-arg',\n type=parse_prog_arg,\n help='Task runner arguments in the form of ARG=VALUE',\n action='append',\n default=[])\n\n args = parser.parse_args()\n\n print(args)\n module: ModuleType = args.task\n module.__name__\n\n log = logger.get_logger('task_runner')\n with Timer(f'Executing `main` function of module {module.__name__}', log=log):\n module.main(*flatten_args(args.prog_arg))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"magic-cp/cf-genie","sub_path":"task_runner.py","file_name":"task_runner.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1822433870","text":"B = [7,4,6,7,9,13,1,10,4]\nA2 = set()\n# WYNIK: 10\n\nA.sort(reverse=True)\nprint(A)\n\n# O(n)\ndef znajdz_2_max(A):\n max = A[0]\n max_2 = A[0]\n\n for liczba in A:\n if liczba > max:\n if max > max_2:\n max_2 = max\n max = liczba\n elif liczba > max_2:\n max_2 = liczba\n\nprint(max_2)","repo_name":"bartekskrzyczek/matura","sub_path":"2najwiekszaliczba.py","file_name":"2najwiekszaliczba.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4004013021","text":"import swapper\nfrom django.db import models\n\nfrom faculty_biodata.models.abstract_classes.base_model import BaseModel\n\n\nclass AbstractAssociateScholar(BaseModel):\n \"\"\"\n This model contains a research scholar associated with the faculty member\n \"\"\"\n\n scholar_name = models.CharField(\n max_length=63,\n )\n institution = models.CharField(\n max_length=127,\n )\n home_page = models.URLField(\n blank=True,\n max_length=255,\n )\n interest = models.CharField(\n max_length=255,\n )\n\n class Meta:\n \"\"\"\n Meta class for AbstractAssociateScholar\n \"\"\"\n\n abstract = True\n\n def __str__(self):\n \"\"\"\n Return the string representation of the model\n :return: the string representation of the model\n \"\"\"\n\n faculty_member = self.faculty_member\n scholar_name = self.scholar_name\n return f'{faculty_member} - {scholar_name}'\n\n\nclass AssociateScholar(AbstractAssociateScholar):\n \"\"\"\n This class implements AbstractAssociateScholar\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for AssociateScholar\n \"\"\"\n\n swappable = swapper.swappable_setting('faculty_biodata',\n 'AssociateScholar')\n","repo_name":"IMGIITRoorkee/omniport-service-faculty-biodata","sub_path":"models/engagements/associate_scholar.py","file_name":"associate_scholar.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12662741129","text":"from django.db import models\nfrom db.base_model import BaseModel\n# Create your models here.\n\n\nclass OrderInfo(BaseModel):\n '''订单模型类'''\n PAY_METHOD = {\n '1':'货到付款',\n '2':'微信支付',\n '3': '支付宝',\n '4': '银联支付'\n }\n\n\n PAY_METHOD_CHOICES = (\n (1, '货到付款'),\n (2, '微信支付'),\n (3, '支付宝'),\n (4, '银联支付')\n )\n\n ORDER_STATUS = {\n 1: '待支付',\n 2: '待发货',\n 3: '待收货',\n 4: '待评价',\n 5: '已完成'\n }\n\n\n ORDER_STATUS_CHOICES = (\n (1, '待支付'),\n (2, '待发货'),\n (3, '待收货'),\n (4, '待评价'),\n (5, '已完成')\n )\n\n order_id = models.CharField(max_length=128, primary_key=True, verbose_name='订单id')\n user = models.ForeignKey('user.User', verbose_name='用户')\n way = models.CharField(max_length=20, verbose_name='区服和上号方式')\n pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=3, verbose_name='支付方式')\n total_hour = models.IntegerField(default=1, verbose_name='租时')\n rent = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='租金')\n deposit = models.DecimalField(max_digits=10, decimal_places=2,verbose_name='押金')\n order_status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name='订单状态')\n coupon = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name='优惠券')\n payment = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name='实际支付')\n trade_no = models.CharField(max_length=128, verbose_name='支付编号')\n\n\n class Meta:\n db_table = 'df_order_info'\n verbose_name = '订单'\n verbose_name_plural = verbose_name\n\n\nclass OrderGoods(BaseModel):\n '''订单商品模型类'''\n order = models.ForeignKey('OrderInfo', verbose_name='订单')\n sku = models.ForeignKey('goods.GoodsSKU', verbose_name='商品SKU')\n rent_time = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='租金')\n\n\n class Meta:\n db_table = 'df_order_goods'\n verbose_name = '订单商品'\n verbose_name_plural = verbose_name\n","repo_name":"niexin-1/gamelife","sub_path":"apps/order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34596777950","text":"import pandas as pd\nimport numpy as np\nimport cv2 # for image processing\nimport os.path\nimport tensorflow as tf\nfrom tensorflow.python.keras.models import load_model\nfrom os import listdir\nfrom ..utils.logger import write_cloud_logger\nfrom ..utils.image_utils import get_image\nfrom ..utils_sequence.sequence_generation_artist_rnn import Sequence_generator_artist_rnn\nfrom ..utils.similarity_measure import Cosine_similarity\n\n\nMODEL_DIR = os.path.join(os.getcwd(), 'static/model')\n\n#Denoisy Auto-encoder\nMODEL_PATH = os.path.join(MODEL_DIR, 'denoisy_encoder.h5')\nMATRIX_FILE_NAME = os.path.join( MODEL_DIR, 'train_mayors_style_encode.npy' )\n\nALL_METADATA_FILE_NAME = os.path.join( MODEL_DIR, 'train_mayors_style_encoded_with_url.csv' )\n\n# Embedding matrix\nEMB_MATRIX_FILE_NAME = os.path.join( MODEL_DIR, 'train_mayors_style_embedding.npy' )\n\n# Artist code matrix\nARITST_CODE_MATRIX_FILE_NAME = os.path.join( MODEL_DIR, 'train_mayors_style_artist_code_matrix.npy' )\n\nmodel = {\n 'model_encoder' : MODEL_PATH,\n 'metadata' : ALL_METADATA_FILE_NAME,\n 'matrix' : MATRIX_FILE_NAME,\n 'emb_matrix' : EMB_MATRIX_FILE_NAME,\n 'artist_code_matrix' : ARITST_CODE_MATRIX_FILE_NAME\n}\n\n\nclass Artwork_sequence_artist_rnn_service:\n\n def __init__(self):\n self.name = \"Artwork_sequence_artist_rnn_service\"\n #load Auto-encoder model\n self._autoencoder_model = load_model(model['model_encoder'])\n\n #Create a similarity measure object\n self.sim_measure = Cosine_similarity()\n\n #Load all metadata\n self._all_metadata = pd.read_csv(model['metadata'])\n \n #load matrix model\n self.artwork_code_matrix = np.load( model['matrix'] )\n #load embedding matrix model\n self.embedding_code_matrix = np.load( model['emb_matrix'] )\n #Combine code with embeddings\n self.code_emb_matrix = np.hstack((self.artwork_code_matrix, self.embedding_code_matrix))\n\n #Reduce artist code matrix\n self.artist_code_matrix = np.load( model['artist_code_matrix'] )\n self.artist_code_emb_matrix = np.hstack((self.code_emb_matrix, self.artist_code_matrix.reshape((-1, 1))))\n\n #load sequence RNN mdoel\n self._sequence_rnn_model = Sequence_generator_artist_rnn(self._all_metadata, self.artist_code_emb_matrix)\n\n\n\n def predict_tour(self, window_images):\n\n #Define window for sequence RNN input\n img_codes =[]\n for img_str in window_images:\n\n image_norm = get_image(img_str)\n #Get the code for the input image\n code = self._autoencoder_model.predict(image_norm).reshape((-1,))\n\n #Find artwork with the most similar code\n sim_matrix = self.sim_measure.get_similarity_measure_matrix(code, self.artwork_code_matrix)\n artwork_index = np.argsort(sim_matrix)[0,-1]\n\n #Append most similar embedding and artist code\n artist_code_emb = self.artist_code_emb_matrix[artwork_index,:] \n\n img_codes.append(artist_code_emb) \n x_tour_matrix = np.stack(img_codes)\n\n print(x_tour_matrix.shape)\n\n \n #Predict tour\n self._sequence_rnn_model.set_tour(x_tour_matrix)\n df_tour_predicted = self._sequence_rnn_model.predict_tour()\n\n #Drop artworks without valid url\n df_tour_predicted = df_tour_predicted.dropna(subset=['imageUrl'])\n\n top_ten = df_tour_predicted[['title', 'artist', 'imageUrl']].transpose().to_dict()\n values = list(top_ten.values())\n \n result = []\n for i in range(len(top_ten)):\n values[i]['id'] = list(top_ten.keys())[i]\n result.append(values[i])\n\n return result\n\n #os.remove(image_path)\n \n","repo_name":"ignaciogatti/artwork-retrieval-api","sub_path":"app/main/service/sequence_artist_rnn_service.py","file_name":"sequence_artist_rnn_service.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17500684698","text":"\"\"\"\n Class Name : Filter\n Class Version: 0.1\n\n Description:\n Filters messages (not for commands) and applies actions accordingly.\n Based on the framework used by the Command.py core module.\n\n Contributors:\n - Euklyd / Popguin\n\n License:\n Arcbot is free software: you can redistribute it and/or modify it\n under the terms of the GNU General Public License v3; as published\n by the Free Software Foundation\n\"\"\"\n\nimport logging\nimport re\n\n\nclass Filter():\n def __init__(self, pattern, callback, ignore=None, name=None, plugin=None,\n server=None, doc_brief=None, doc_detail=None, flags=0):\n self.pattern = pattern\n self.callback = callback\n self.ignore = ignore\n self.name = name\n self.plugin = plugin\n self.server = server\n self.doc_brief = doc_brief\n self.flags = flags\n if (doc_detail is None):\n self.doc_detail = doc_brief\n else:\n self.doc_detail = doc_detail\n\n self.logger = logging.getLogger(__name__)\n\n def __str__(self):\n return self.callback.__name__\n\n async def invoke(self, message, arguments):\n await self.callback(message, arguments)\n\n\nclass FilterManager():\n def __init__(self, core):\n self.filters = {}\n self.core = core\n self.logger = logging.getLogger(__name__)\n\n async def check(self, message):\n \"\"\"\n Summary:\n Checks an incoming message against all filters\n Invokes any filter that catches the message\n\n Args:\n message (Message): A message instance from discord.Message\n\n Returns:\n None\n \"\"\"\n if message.author.bot:\n return\n if message.server is None:\n return\n filters = list(self.filters.items())\n # self.logger.info(filters)\n for key, msg_filter in filters:\n # self.logger.info(\"key: {}\".format(key))\n # self.logger.info(\"filter: {}\".format(msg_filter))\n # self.logger.info(\"flags: {}\".format(msg_filter.flags))\n match = re.search(msg_filter.pattern, message.content, msg_filter.flags)\n if (msg_filter.server is not None and\n match and (\n msg_filter.server is True or\n message.server.id in msg_filter.server)\n ):\n # self.logger.debug(\"'{}' detected\")\n self.logger.info(\"{}: '{}' detected\".format(msg_filter.name, message.content))\n if (msg_filter.ignore is None or self.core.ACL.getAccess(message.author.id) < msg_filter.ignore):\n arguments = match.groups()\n # self.logger.info(arguments)\n self.logger.info(\"'{}' invoked\".format(message.content))\n await msg_filter.invoke(message, arguments)\n\n def register(self, pattern, callback, ignore=None, filter_name=None,\n server=None, doc_brief=None, doc_detail=None, flags=0):\n \"\"\"\n Summary:\n Pushes Filter instance to filter list\n\n Args:\n pattern (str): Regex that the message parser will match with\n callback (func): Function object that will be invoked when message parser finds a match\n ignore (int): Ignore messages from authors with access greater than this amount\n\n Returns:\n None\n \"\"\"\n clazz = type(callback.__self__).__name__\n name = clazz + \".\" + callback.__name__\n\n if (name in self.filters):\n self.logger.warning(\"Duplicate filter \\\"\" + clazz + \".\" + name + \"\\\". Skipping registration.\")\n return\n else:\n self.logger.debug(\"Registered filter \\\"\" + clazz + \".\" + name + \"\\\"\")\n\n if (server == \"TEST\"):\n server = self.core.test_server\n\n self.filters[name] = Filter(\n pattern,\n callback,\n ignore=ignore,\n name=filter_name,\n plugin=clazz,\n server=server,\n doc_brief=doc_brief,\n doc_detail=doc_detail,\n flags=flags\n )\n # self.logger.info(self.filters[name])\n\n def unregister(self, filter_name):\n \"\"\"\n Summary:\n Unregisters a filter\n Removes Filter instance from filter list\n Filter will no longer run when message parser finds a match\n\n Args:\n filter_name (str): Name of the filter to unregister\n\n Returns:\n None\n \"\"\"\n if (filter_name in self.filters):\n msg_filter = self.filters[filter_name]\n\n clazz = type(msg_filter.callback.__self__).__name__\n name = clazz + \".\" + msg_filter.callback.__name__\n\n del self.filters[filter_name]\n self.logger.debug(\"Unregistered filter \\\"\" + name + \"\\\"\")\n\n else:\n self.logger.warning(\"Cannot unregister \\\"\" + filter_name + \"\\\", filter not found.\")\n","repo_name":"euklyd/PenguinBot3K","sub_path":"core/Filter.py","file_name":"Filter.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14906989298","text":"from tkinter import *\nimport tkinter.filedialog\nimport tkinter as tk\nimport socket as s\nfrom tkinter.tix import IMAGE\nimport sys\nimport os\nimport tkinter as tk\nfrom pathlib import Path\nfrom glob import glob\nfrom logging import exception\nfrom textwrap import wrap\nfrom tkinter import *\nimport requests\nimport urllib.request\nimport json\nfrom tkinter.messagebox import showinfo\nfrom bs4 import BeautifulSoup\nimport threading\n\n#--------------------------------------------------------\nsys.path.insert(0, str(Path(__file__).resolve().parent.parent))\n\n# Path to asset files for this GUI master.\nASSETS_PATH = Path(__file__).resolve().parent / \"assets\"\n\n# Required in order to add data files to masters executable\npath = getattr(sys, '_MEIPASS', os.getcwd())\nos.chdir(path)\n\noutput_path = \"\"\ndef IP2LOCATION(domain):\n GEO_IP_API_URL = 'http://ip-api.com/json/'\n DOMAIN = domain \n req = urllib.request.Request(GEO_IP_API_URL + DOMAIN)\n response = urllib.request.urlopen(req).read()\n json_response = json.loads(response.decode('utf-8'))\n return (json_response)\n\ndef FIND_IP(domain):\n \"\"\"\n Get host name and return Ip address of website\n \"\"\"\n info = s.getaddrinfo(domain,80) \n LIST_IP =[]\n for i in info:\n LIST_IP.append(i[4][0])\n return (LIST_IP)\n \nPORTS =[]\ndef PORT_SCANER(port):\n so = s.socket(s.AF_INET, s.SOCK_STREAM)\n s.setdefaulttimeout(0.1)\n\n res = so.connect_ex((domain,port))\n if res ==0:\n try:\n server_name = s.getservbyport(port,'tcp')\n except:\n server_name = ''\n \n result = {\n 'port':port,\n 'state':'Open',\n 'server name':server_name\n } \n else:\n try:\n server_name = s.getservbyport(port,'tcp')\n except:\n server_name = ''\n result = {\n 'port':port,\n 'state':'Close',\n 'server name':server_name\n } \n so.close()\n PORTS.append(result)\n return result\n\ndef FIND_DIRECTORY(domain):\n url = f\"http://www.{domain}\"\n soup = BeautifulSoup(requests.get(url).text,\"html.parser\")\n\n hrefs = []\n for a in soup.find_all('a'):\n hrefs.append(a['href'])\n\n return hrefs\n\ndef CHECK_PORT():\n port = int(port_value.get())\n res = PORT_SCANER(port)\n showinfo(\n title='Port Information',\n message=F\"Port{res['port']} is {res['state']}\")\n\ndef select_path():\n global output_path\n\n output_path = tkinter.filedialog.askdirectory()\n path_entry.delete(0, tk.END)\n path_entry.insert(0, output_path)\n\ndef select_path_SB():\n global output_path_SB\n\n output_path_SB = tkinter.filedialog.askdirectory()\n path_entry_SB.delete(0, tk.END)\n path_entry_SB.insert(0, output_path_SB)\n\n\ndef Download_directorys():\n path_to_save = path_entry.get()\n completeName = os.path.join(path_to_save, \"Directorys.txt\")\n Links = FIND_DIRECTORY(domain)\n with open(completeName, 'w') as f:\n for item in Links:\n f.write(\"%s\\n\" % item)\n tk.messagebox.showinfo(\n \"Success!\", f\"All Directorys successfully downloaded at {path_to_save}.\")\n\nSUB_DOMAINS = []\ndef FIND_SUBDOMAINS(subdomain):\n\n s = requests.session()\n url = f'https://{subdomain}.{domain}'\n try:\n s.get(url ,timeout=1.5)\n #print(url)\n SUB_DOMAINS.append(url)\n except:\n pass\n\n\ndef Download_SubDomain():\n path_to_save = path_entry_SB.get()\n completeName= os.path.join(path_to_save, \"SubDomains.txt\") \n threads = []\n file = open('Subdomain50.txt','r')\n Subs = file.read()\n sub_domain = Subs.splitlines()\n\n for i in sub_domain:\n thread = threading.Thread(target=FIND_SUBDOMAINS ,args=[i])\n thread.start()\n threads.append(thread)\n\n for thread in threads:\n thread.join()\n\n with open(completeName, 'w') as f:\n for item in SUB_DOMAINS:\n f.write(\"%s\\n\" % item)\n tk.messagebox.showinfo(\n \"Success!\", f\"Subdomains successfully downloaded at {path_to_save}.\")\n\ndef btn_clicked():\n global domain\n domain = domain_entry.get()\n master.destroy()\n\n window = Tk()\n window.title(\"Information Gathering Tool\") \n window.geometry(\"862x519\")\n window.configure(bg = \"#e5ecf9\")\n #------HEADER GUI---------\n canvas = Canvas(\n window,\n bg = \"#e5ecf9\",\n height = 519,\n width = 862,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\")\n canvas.place(x = 0, y = 0)\n\n\n canvas.create_rectangle(\n 35, 26, 35+200, 26+45,\n fill = \"#6592fe\",\n outline = \"\")\n\n\n canvas.create_rectangle(\n 235, 26, 235+200, 26+45,\n fill = \"#85a9ff\",\n outline = \"\")\n\n\n canvas.create_rectangle(\n 431, 26, 431+200, 26+45,\n fill = \"#6592fe\",\n outline = \"\")\n\n\n canvas.create_rectangle(\n 631, 26, 631+200, 26+45,\n fill = \"#85a9ff\",\n outline = \"\")\n\n canvas.create_text(\n 135.5, 50,\n text = \"Domain Name\",\n fill = \"#FFFFFF\",\n font = (\"Rasa-Regular\", int(18.0)))\n\n canvas.create_text(\n 326.5, 50,\n text = \"Country\",\n fill = \"#FFFFFF\",\n font = (\"Rasa-Regular\", int(18.0)))\n\n canvas.create_text(\n 720, 50,\n text = \"ISP\",\n fill = \"#FFFFFF\",\n font = (\"Rasa-Regular\", int(18.0)))\n\n canvas.create_text(\n 520.5, 50,\n text = \"City\",\n fill = \"#FFFFFF\",\n font = (\"Rasa-Regular\", int(18.0)))\n\n\n canvas.create_rectangle(\n 35, 71, 35+200, 71+45,\n fill = \"#e5e5e5\",\n outline = \"\")\n\n\n canvas.create_rectangle(\n 235, 71, 235+200, 71+45,\n fill = \"#dfdfdf\",\n outline = \"\")\n\n\n canvas.create_rectangle(\n 431, 71, 431+200, 71+45,\n fill = \"#e5e5e5\",\n outline = \"\")\n\n\n canvas.create_rectangle(\n 631, 71, 631+200, 71+45,\n fill = \"#dfdfdf\",\n outline = \"\")\n#------------Call IP2LOCATION-----------\n\n location = IP2LOCATION(domain)\n\n #DOMAIN_NAME TEXT\n canvas.create_text(\n 120, 90.0,\n text = domain,\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(12.0)))\n #COUNTRY TEXT\n canvas.create_text(\n 328.5, 90.0,\n text = location['country'],\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(12.0)))\n #CITY TEXT\n canvas.create_text(\n 508.0, 90,\n text = location['city'],\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(12.0)))\n #ISP TEXT\n canvas.create_text(\n 721.5, 90,\n text = location['isp'],\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(12.0)))\n #----------IP ADDRESS GUI-----------------\n\n ip_bg = PhotoImage(file=ASSETS_PATH / \"ip_bg.png\")\n LIST_IP_ADDRESESS = FIND_IP(domain)\n\n for i in range(0,len(LIST_IP_ADDRESESS)):\n x = 135 + (i*200)\n y = 150 \n ip1 = canvas.create_image(x, y, image=ip_bg)\n\n canvas.create_text(\n x , y,\n text = LIST_IP_ADDRESESS[i],\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(12.0)))\n if (i==3):\n break\n\n #------OPEN PORT GUI----------\n canvas.create_rectangle(\n 35, 196, 35+80, 196+30,\n fill = \"#6592fe\",\n outline = \"\")\n\n\n canvas.create_rectangle(\n 115, 196, 115+70, 196+30,\n fill = \"#85a9ff\",\n outline = \"\")\n\n\n canvas.create_rectangle(\n 185, 196, 185+85, 196+30,\n fill = \"#b1c7fe\",\n outline = \"\")\n\n canvas.create_text(\n 72.5, 210.0,\n text = \"Port\",\n fill = \"#FFFFFF\",\n font = (\"Comic Sans MS\", int(18.0)))\n\n canvas.create_text(\n 148.0, 211.0,\n text = \"State\",\n fill = \"#FFFFFF\",\n font = (\"Comic Sans MS\", int(18.0)))\n\n canvas.create_text(\n 225.0, 210.0,\n text = \"Service Name\",\n fill = \"#FFFFFF\",\n font = (\"Comic Sans MS\", int(10.0)))\n\n\n canvas.create_rectangle(\n 35, 226, 35+235, 226+255,\n fill = \"#e5e5e5\",\n outline = \"\")\n\n loading_png = tk.PhotoImage(file=ASSETS_PATH / \"loading.png\")\n loading_img = canvas.create_image(149.5, 350, image=loading_png)\n #call port scanner:\n\n def find(): \n canvas.delete(loading_img) \n ports = [21,22,23,53,80,443,194]\n\n threads = []\n for i in ports:\n thread = threading.Thread(target=PORT_SCANER ,args=[i])\n\n thread.start()\n threads.append(thread)\n\n\n for thread in threads:\n thread.join()\n for i in range(0,len(PORTS)):\n\n y = 250 +(i*20)\n\n canvas.create_text(\n 73, y,\n text =PORTS[i]['port'],\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(10.0)))\n canvas.create_text(\n 149, y,\n text =PORTS[i]['state'],\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(10.0)))\n\n canvas.create_text(\n 226, y,\n text =PORTS[i]['server name'],\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(10.0))) \n if y >= 400:\n break\n global port_value \n port_value = tk.Spinbox(window,from_=1 , to=65535,width=10,textvariable=IntVar())\n port_value.place(x=100 , y=420)\n\n canvas.create_text(\n 150, 450,\n text ='must be between 1 and 65535',\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(8.0)))\n\n search_btn_img = tk.PhotoImage(file=ASSETS_PATH / \"seach_btn.png\")\n search_btn = tk.Button(\n image=search_btn_img, borderwidth=0, highlightthickness=0,\n command=CHECK_PORT, relief=\"flat\")\n search_btn.place(x=180, y=415, width=25, height=25)\n #---------SUB DOMAIN--------\n\n canvas.create_rectangle(\n 591, 226, 591+240, 226+255,\n fill = \"#e5e5e5\",\n outline = \"\")\n\n canvas.create_rectangle(\n 315, 196, 315+240, 196+30,\n fill = \"#85a9ff\",\n outline = \"\")\n\n canvas.create_text(\n 435.0, 210.0,\n text = \"Sub Domains\",\n fill = \"#FFFFFF\",\n font = (\"Comic Sans MS\", int(14.0)))\n\n canvas.create_rectangle(\n 315, 222, 315+240, 222+255,\n fill = \"#b1c7fe\",\n outline = \"\")\n loading_png1 = tk.PhotoImage(file=ASSETS_PATH / \"loading.png\")\n loading_img1 = canvas.create_image(430, 350, image=loading_png1)\n #SubDomain:\n def SUB ():\n canvas.delete(loading_img1)\n threads = []\n sub_domain = ['www','mail','store','news','admin','m','email','1']\n for i in sub_domain:\n thread = threading.Thread(target=FIND_SUBDOMAINS ,args=[i])\n thread.start()\n threads.append(thread)\n\n\n for thread in threads:\n thread.join()\n\n for i in range(0,len(SUB_DOMAINS)):\n\n y = 250 +(i*20)\n\n canvas.create_text(\n 430, y,\n text =SUB_DOMAINS[i],\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(10.0)))\n \n if y >= 400:\n break\n #---------Download All Subdomains----:\n global path_entry_SB\n path_entry_SB = tk.Entry(bd=0, bg=\"#F6F7F9\", highlightthickness=0)\n path_entry_SB.place(x=325.0, y=435, width=150.0, height=20)\n path_picker_img1 = tk.PhotoImage(file = ASSETS_PATH / \"path_picker.png\")\n path_picker_button = tk.Button(\n image = path_picker_img1,\n text = '',\n compound = 'center',\n fg = 'white',\n borderwidth = 0,\n highlightthickness = 0,\n command = select_path_SB,\n relief = 'flat')\n path_picker_button.place(\n x = 458, y = 435,\n width = 24,\n height = 22)\n download_btn_img1 = tk.PhotoImage(file=ASSETS_PATH / \"download_btn.png\")\n download_btn = tk.Button(\n image=download_btn_img1, borderwidth=0, highlightthickness=0,\n command=Download_SubDomain, relief=\"flat\")\n download_btn.place(x=490, y=432, width=25, height=25)\n\n\n #-----DIRECTORYS GUI------:\n canvas.create_rectangle(\n 591, 196, 591+240, 196+30,\n fill = \"#85a9ff\",\n outline = \"\")\n\n\n\n #Directory:\n canvas.create_text(\n 695.0, 210.0,\n text = \"Directorys\",\n fill = \"#FFFFFF\",\n font = (\"Comic Sans MS\", int(14.0)))\n\n Links = FIND_DIRECTORY(domain)\n\n for i in range(0,len(Links)):\n\n y = 250 +(i*20)\n\n if len(Links[i])>40:\n text = Links[i][0:35] + '...'\n else:\n text = Links[i]\n canvas.create_text(\n 710, y,\n justify='right',\n text ='[+]'+text,\n fill = \"#000000\",\n font = (\"Comic Sans MS\", int(8.0)))\n if y >= 400:\n break\n #------------Download Directory Button-----------:\n global path_entry\n path_entry = tk.Entry(bd=0, bg=\"#F6F7F9\", highlightthickness=0)\n path_entry.place(x=600.0, y=435, width=150.0, height=20)\n path_picker_img = tk.PhotoImage(file = ASSETS_PATH / \"path_picker.png\")\n path_picker_button = tk.Button(\n image = path_picker_img,\n text = '',\n compound = 'center',\n fg = 'white',\n borderwidth = 0,\n highlightthickness = 0,\n command = select_path,\n relief = 'flat')\n path_picker_button.place(\n x = 745, y = 435,\n width = 24,\n height = 22)\n download_btn_img = tk.PhotoImage(file=ASSETS_PATH / \"download_btn.png\")\n generate_btn = tk.Button(\n image=download_btn_img, borderwidth=0, highlightthickness=0,\n command=Download_directorys, relief=\"flat\")\n generate_btn.place(x=780, y=435, width=25, height=25)\n\n\n window.after(3000,find)\n window.after(500,SUB)\n window.resizable(False, False)\n window.mainloop()\n\n\ndef main():\n global master\n master = tk.Tk()\n master.geometry(\"862x519\")\n logo = tk.PhotoImage(file=ASSETS_PATH / \"gathering.jpg\")\n master.call('wm', 'iconphoto', master._w, logo)\n master.title(\"Information Gathering Tool\")\n master.configure(bg=\"#3A7FF6\")\n canvas = tk.Canvas(\n master, bg=\"#3A7FF6\", height=519, width=862,\n bd=0, highlightthickness=0, relief=\"ridge\")\n canvas.place(x=0, y=0)\n canvas.create_rectangle(431, 0, 431 + 431, 0 + 519, fill=\"#FCFCFC\", outline=\"\")\n canvas.create_rectangle(40, 160, 40 + 60, 160 + 5, fill=\"#FCFCFC\", outline=\"\")\n #INFORMATION GATHERING TOOL:\n canvas.create_text(\n 370, 60,\n text = \"INFOR\",\n fill = \"#ffffff\",\n font = (\"graceful\", int(30)))\n\n canvas.create_text(\n 506, 60,\n text = \"MATION\",\n fill = \"BLUE\",\n font = (\"RibeyeMarrow-Regular\", int(30.0)))\n\n\n canvas.create_text(\n 375, 100,\n text = \"GATH\",\n fill = \"#ffffff\",\n font = (\"RibeyeMarrow-Regular\", int(30.0)))\n\n canvas.create_text(\n 497, 100,\n text = \"ERING\",\n fill = \"BLUE\",\n font = (\"RibeyeMarrow-Regular\", int(30.0)))\n\n canvas.create_text(\n 402.5, 135.5,\n text = \"TO\",\n fill = \"#ffffff\",\n font = (\"RibeyeMarrow-Regular\", int(30.0)))\n\n canvas.create_text(\n 455, 135.5,\n text = \"OL\",\n fill = \"BLUE\",\n font = (\"RibeyeMarrow-Regular\", int(30.0)))\n #-----\n text_box_bg = tk.PhotoImage(file=ASSETS_PATH / \"TextBox_Bg.png\")\n token_entry_img = canvas.create_image(650.5, 200.5, image=text_box_bg)\n\n #ENTRY: Get domain name:\n canvas.create_text(\n 490.0, 180.0, text=\"Domain Name:\", fill=\"#515486\",\n font=(\"Arial-BoldMT\", int(13.0)), anchor=\"w\")\n global domain_entry\n domain_entry = tk.Entry(bd=0, bg=\"#F6F7F9\", highlightthickness=0)\n domain_entry.place(x=490.0, y=200, width=321.0, height=35)\n domain_entry.focus()\n\n\n\n title = tk.Label(\n text=\"Welcome to Information Gathering\", bg=\"#3A7FF6\",\n fg=\"white\", font=(\"Arial-BoldMT\", int(18.0)))\n title.place(x=20.0, y=160.0)\n\n\n info_text = tk.Label(\n text=\"Information Gathering Tool uses the Sockets\\n\"\n \"to analyse a Website:\\n\"\n \"▫Find Ip Address\\n\"\n \"▫Find Open Ports\\n\"\n \"▫Find Subdomains\\n\" \n \"▫Find Directory\\n\" \n\n\n ,bg=\"#3A7FF6\", fg=\"white\", justify=\"left\",\n font=(\"Georgia\", int(14.0)))\n\n info_text.place(x=27.0, y=200.0)\n\n name_text = tk.Label(\n text=\"✍(✿◠‿◠) Fatemeh Homayouni | UMZ \" \n ,bg=\"#3A7FF6\", fg=\"white\", justify=\"left\",\n font=(\"Georgia\", int(10.0)))\n\n name_text.place(x=27.0, y=450.0) \n #---Host Information: --\n location_bg = tk.PhotoImage(file=ASSETS_PATH / \"location.png\")\n location_img = canvas.create_image(500.5, 270.5, image=location_bg)\n my_hostname = s.gethostname()\n my_hostip = s.gethostbyname(my_hostname)\n\n title = tk.Label(\n text=f\"Your IP address is : {my_hostip}\",bg='white',\n fg=\"black\", font=(\"Arial-BoldMT\", int(10.0)))\n title.place(x=510.5, y=260.5)\n\n\n\n generate_btn_img = tk.PhotoImage(file=ASSETS_PATH / \"generate.png\")\n generate_btn = tk.Button(\n image=generate_btn_img, borderwidth=0, highlightthickness=0,\n command=btn_clicked, relief=\"flat\")\n generate_btn.place(x=557, y=350, width=180, height=55)\n\n master.mainloop()\n\n\nmain()","repo_name":"f4teme/information-gathering-tools","sub_path":"Information_Gathering.py","file_name":"Information_Gathering.py","file_ext":"py","file_size_in_byte":17346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18718162373","text":"from torch import nn\nfrom torch_geometric.nn import (\n global_max_pool,\n global_mean_pool,\n fps,\n radius,\n knn_interpolate,\n)\nfrom torch.nn import (\n Linear as Lin,\n ReLU,\n LeakyReLU,\n BatchNorm1d as BN,\n Dropout,\n)\nfrom torch_points3d.applications.base_architectures.base_model import BaseModel\nfrom omegaconf.listconfig import ListConfig\nfrom omegaconf.dictconfig import DictConfig\nimport logging\nimport copy\n\n# from torch_points3d.models.base_model import BaseModel\nfrom torch_points3d.core.common_modules.base_modules import Identity\n\n\nlog = logging.getLogger(__name__)\n\n\nSPECIAL_NAMES = [\"radius\", \"max_num_neighbors\", \"block_names\"]\n\n\ndef is_list(entity):\n return isinstance(entity, list) or isinstance(entity, ListConfig)\n\n\nclass BaseFactory:\n def __init__(self, module_name_down, module_name_up, modules_lib):\n self.module_name_down = module_name_down\n self.module_name_up = module_name_up\n self.modules_lib = modules_lib\n\n def get_module(self, flow):\n if flow.upper() == \"UP\":\n return getattr(self.modules_lib, self.module_name_up, None)\n else:\n return getattr(self.modules_lib, self.module_name_down, None)\n\n\n############################# UNWRAPPED UNET BASE ###################################\n\n\nclass UnwrappedUnetBasedModel(BaseModel):\n \"\"\"Create a Unet unwrapped generator\"\"\"\n\n def _save_sampling_and_search(self, down_conv):\n sampler = getattr(down_conv, \"sampler\", None)\n if is_list(sampler):\n self._spatial_ops_dict[\"sampler\"] += sampler\n else:\n self._spatial_ops_dict[\"sampler\"].append(sampler)\n\n neighbour_finder = getattr(down_conv, \"neighbour_finder\", None)\n if is_list(neighbour_finder):\n self._spatial_ops_dict[\"neighbour_finder\"] += neighbour_finder\n else:\n self._spatial_ops_dict[\"neighbour_finder\"].append(neighbour_finder)\n\n def _save_upsample(self, up_conv):\n upsample_op = getattr(up_conv, \"upsample_op\", None)\n if upsample_op:\n self._spatial_ops_dict[\"upsample_op\"].append(upsample_op)\n\n def __init__(self, opt, model_type, modules_lib):\n \"\"\"Construct a Unet unwrapped generator\n\n The layers will be appended within lists with the following names\n * down_modules : Contains all the down module\n * inner_modules : Contain one or more inner modules\n * up_modules: Contains all the up module\n\n Parameters:\n opt - options for the network generation\n model_type - type of the model to be generated\n num_class - output of the network\n modules_lib - all modules that can be used in the UNet\n\n For a recursive implementation. See UnetBaseModel.\n\n opt is expected to contains the following keys:\n * down_conv\n * up_conv\n * OPTIONAL: innermost\n\n \"\"\"\n opt = copy.deepcopy(opt)\n super(UnwrappedUnetBasedModel, self).__init__()\n # detect which options format has been used to define the model\n self._spatial_ops_dict = {\"neighbour_finder\": [], \"sampler\": [], \"upsample_op\": []}\n\n if is_list(opt.down_conv) or \"down_conv_nn\" not in opt.down_conv:\n raise NotImplementedError\n else:\n self._init_from_compact_format(opt, model_type, modules_lib)\n\n def _collect_sampling_ids(self, list_data):\n def extract_matching_key(keys, start_token):\n for key in keys:\n if key.startswith(start_token):\n return key\n return None\n\n d = {}\n if self.save_sampling_id:\n for idx, data in enumerate(list_data):\n key = extract_matching_key(data.keys, \"sampling_id\")\n if key:\n d[key] = getattr(data, key)\n return d\n\n def _get_from_kwargs(self, kwargs, name):\n module = kwargs[name]\n kwargs.pop(name)\n return module\n\n def _create_inner_modules(self, args_innermost, modules_lib):\n inners = []\n if is_list(args_innermost):\n for inner_opt in args_innermost:\n module_name = self._get_from_kwargs(inner_opt, \"module_name\")\n inner_module_cls = getattr(modules_lib, module_name)\n inners.append(inner_module_cls(**inner_opt))\n\n else:\n module_name = self._get_from_kwargs(args_innermost, \"module_name\")\n inner_module_cls = getattr(modules_lib, module_name)\n inners.append(inner_module_cls(**args_innermost))\n\n return inners\n\n def _init_from_compact_format(self, opt, model_type, modules_lib):\n \"\"\"Create a unetbasedmodel from the compact options format - where the\n same convolution is given for each layer, and arguments are given\n in lists\n \"\"\"\n\n self.down_modules = nn.ModuleList()\n self.inner_modules = nn.ModuleList()\n self.up_modules = nn.ModuleList()\n\n self.save_sampling_id = opt.down_conv.get(\"save_sampling_id\")\n\n # Factory for creating up and down modules\n factory_module_cls = self._get_factory(model_type, modules_lib)\n down_conv_cls_name = opt.down_conv.module_name\n up_conv_cls_name = opt.up_conv.module_name if opt.get(\"up_conv\") is not None else None\n self._factory_module = factory_module_cls(\n down_conv_cls_name, up_conv_cls_name, modules_lib\n ) # Create the factory object\n\n # Loal module\n contains_global = hasattr(opt, \"innermost\") and opt.innermost is not None\n if contains_global:\n inners = self._create_inner_modules(opt.innermost, modules_lib)\n for inner in inners:\n self.inner_modules.append(inner)\n else:\n self.inner_modules.append(Identity())\n\n # Down modules\n for i in range(len(opt.down_conv.down_conv_nn)):\n args = self._fetch_arguments(opt.down_conv, i, \"DOWN\")\n\n conv_cls = self._get_from_kwargs(args, \"conv_cls\")\n down_module = conv_cls(**args)\n self._save_sampling_and_search(down_module)\n self.down_modules.append(down_module)\n\n # Up modules\n if up_conv_cls_name:\n for i in range(len(opt.up_conv.up_conv_nn)):\n args = self._fetch_arguments(opt.up_conv, i, \"UP\")\n conv_cls = self._get_from_kwargs(args, \"conv_cls\")\n up_module = conv_cls(**args)\n self._save_upsample(up_module)\n self.up_modules.append(up_module)\n\n def _get_factory(self, model_name, modules_lib) -> BaseFactory:\n factory_module_cls = getattr(modules_lib, \"{}Factory\".format(model_name), None)\n if factory_module_cls is None:\n factory_module_cls = BaseFactory\n return factory_module_cls\n\n def _fetch_arguments_from_list(self, opt, index):\n \"\"\"Fetch the arguments for a single convolution from multiple lists\n of arguments - for models specified in the compact format.\n \"\"\"\n args = {}\n for o, v in opt.items():\n name = str(o)\n if is_list(v) and len(getattr(opt, o)) > 0:\n if name[-1] == \"s\" and name not in SPECIAL_NAMES:\n name = name[:-1]\n v_index = v[index]\n if is_list(v_index):\n v_index = list(v_index)\n args[name] = v_index\n else:\n if is_list(v):\n v = list(v)\n args[name] = v\n return args\n\n def _fetch_arguments(self, conv_opt, index, flow):\n \"\"\"Fetches arguments for building a convolution (up or down)\n\n Arguments:\n conv_opt\n index in sequential order (as they come in the config)\n flow \"UP\" or \"DOWN\"\n \"\"\"\n args = self._fetch_arguments_from_list(conv_opt, index)\n args[\"conv_cls\"] = self._factory_module.get_module(flow)\n args[\"index\"] = index\n return args\n\n def _flatten_compact_options(self, opt):\n \"\"\"Converts from a dict of lists, to a list of dicts\"\"\"\n flattenedOpts = []\n\n for index in range(int(1e6)):\n try:\n flattenedOpts.append(DictConfig(self._fetch_arguments_from_list(opt, index)))\n except IndexError:\n break\n\n return flattenedOpts\n\n def forward(self, data, precomputed_down=None, precomputed_up=None, **kwargs):\n \"\"\"This method does a forward on the Unet assuming symmetrical skip connections\n\n Parameters\n ----------\n data: torch.geometric.Data\n Data object that contains all info required by the modules\n precomputed_down: torch.geometric.Data\n Precomputed data that will be passed to the down convs\n precomputed_up: torch.geometric.Data\n Precomputed data that will be passed to the up convs\n \"\"\"\n stack_down = []\n for i in range(len(self.down_modules) - 1):\n data = self.down_modules[i](data, precomputed=precomputed_down)\n stack_down.append(data)\n data = self.down_modules[-1](data, precomputed=precomputed_down)\n\n if not isinstance(self.inner_modules[0], Identity):\n stack_down.append(data)\n data = self.inner_modules[0](data)\n\n sampling_ids = self._collect_sampling_ids(stack_down)\n\n for i in range(len(self.up_modules)):\n data = self.up_modules[i]((data, stack_down.pop()), precomputed=precomputed_up)\n\n for key, value in sampling_ids.items():\n setattr(data, key, value)\n return data\n","repo_name":"torch-points3d/tp3d-testrepo","sub_path":"torch_points3d/applications/base_architectures/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":9689,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"11606491105","text":"import os\nfrom io import IOBase\nfrom typing import TypeVar, TYPE_CHECKING, Optional, Union\n\nfrom neptune.new.internal.utils.images import (\n get_image_content,\n get_html_content,\n get_pickle_content,\n is_pil_image,\n is_matplotlib_figure,\n is_plotly_figure,\n is_altair_chart,\n is_bokeh_figure,\n is_numpy_array,\n is_pandas_dataframe,\n)\n\n\nfrom neptune.new.internal.utils import limits, verify_type, get_stream_content\nfrom neptune.new.types.atoms.atom import Atom\n\nif TYPE_CHECKING:\n from neptune.new.types.value_visitor import ValueVisitor\n\nRet = TypeVar(\"Ret\")\n\n\nclass File(Atom):\n def __init__(\n self,\n path: Optional[str] = None,\n content: Optional[bytes] = None,\n extension: Optional[str] = None,\n ):\n verify_type(\"path\", path, (str, type(None)))\n verify_type(\"content\", content, (bytes, type(None)))\n verify_type(\"extension\", extension, (str, type(None)))\n\n if path is not None and content is not None:\n raise ValueError(\"path and content are mutually exclusive\")\n if path is None and content is None:\n raise ValueError(\"path or content is required\")\n\n self.path = path\n self.content = content\n\n if extension is None and path is not None:\n try:\n ext = os.path.splitext(path)[1]\n self.extension = ext[1:] if ext else \"\"\n except ValueError:\n self.extension = \"\"\n else:\n self.extension = extension or \"\"\n\n def accept(self, visitor: \"ValueVisitor[Ret]\") -> Ret:\n return visitor.visit_file(self)\n\n def __str__(self):\n if self.path is not None:\n return \"File(path={})\".format(str(self.path))\n else:\n return \"File(content=...)\"\n\n @staticmethod\n def from_content(\n content: Union[str, bytes], extension: Optional[str] = None\n ) -> \"File\":\n \"\"\"Factory method for creating File value objects directly from binary and text content.\n\n In the case of text content, UTF-8 encoding will be used.\n\n Args:\n content\t(str or bytes): Text or binary content to stored in the `File` value object.\n extension (str, optional, default is None): Extension of the created file.\n File will be used for interpreting the type of content for visualization.\n If `None` it will be bin for binary content and txt for text content.\n Defaults to `None`.\n\n Returns:\n ``File``: value object created from the content\n\n You may also want to check `from_content docs page`_.\n\n .. _from_content docs page:\n https://docs.neptune.ai/api-reference/field-types#.from_content\n \"\"\"\n if isinstance(content, str):\n ext = \"txt\"\n content = content.encode(\"utf-8\")\n else:\n ext = \"bin\"\n\n if limits.file_size_exceeds_limit(len(content)):\n content = \"\"\n\n return File(content=content, extension=extension or ext)\n\n @staticmethod\n def from_stream(\n stream: IOBase, seek: Optional[int] = 0, extension: Optional[str] = None\n ) -> \"File\":\n \"\"\"Factory method for creating File value objects directly from binary and text streams.\n\n In the case of text stream, UTF-8 encoding will be used.\n\n Args:\n stream (IOBase): Stream to be converted.\n seek (int, optional): See IOBase documentation.\n Defaults to `0`.\n extension (str, optional): Extension of the file created that will be used for interpreting the type\n of content for visualization.\n If `None` it will be bin for binary stream and txt for text stream.\n Defaults to `None`.\n\n Returns:\n ``File``: value object created from the stream.\n\n You may also want to check `from_stream docs page`_ and `IOBase documentation`_.\n\n .. _from_stream docs page:\n https://docs.neptune.ai/api-reference/field-types#.from_stream\n .. _IOBase documentation:\n https://docs.python.org/3/library/io.html#io.IOBase\n \"\"\"\n verify_type(\"stream\", stream, IOBase)\n content, stream_default_ext = get_stream_content(stream, seek)\n return File(content=content, extension=extension or stream_default_ext)\n\n @staticmethod\n def as_image(image) -> \"File\":\n \"\"\"Static method for converting image objects or image-like objects to an image File value object.\n\n This way you can upload `Matplotlib` figures, `PIL` images, `NumPy` arrays, as static images.\n\n Args:\n image: Image-like object to be converted.\n Supported are `PyTorch` tensors, `TensorFlow/Keras` tensors, `NumPy` arrays, `PIL` images\n and `Matplotlib` figures.\n\n Returns:\n ``File``: value object with converted image\n\n Examples:\n >>> import neptune.new as neptune\n >>> from neptune.new.types import File\n >>> run = neptune.init()\n\n Convert NumPy array to File value object and upload it\n\n >>> run[\"train/prediction_example\"].upload(File.as_image(numpy_array))\n\n Convert PIL image to File value object and upload it\n\n >>> pil_file = File.as_image(pil_image)\n >>> run[\"dataset/data_sample/img1\"].upload(pil_file)\n\n You can upload PIL image without explicit conversion\n\n >>> run[\"dataset/data_sample/img2\"].upload(pil_image)\n\n You may also want to check `as_image docs page`_.\n\n .. _as_image docs page:\n https://docs.neptune.ai/api-reference/field-types#.as_image\n \"\"\"\n content_bytes = get_image_content(image)\n return File.from_content(\n content_bytes if content_bytes is not None else b\"\", extension=\"png\"\n )\n\n @staticmethod\n def as_html(chart) -> \"File\":\n \"\"\"Converts an object to an HTML File value object.\n\n This way you can upload `Altair`, `Bokeh`, `Plotly`, `Matplotlib` interactive charts\n or upload directly `Pandas` `DataFrame` objects to explore them in Neptune UI.\n\n Args:\n chart: An object to be converted.\n Supported are `Altair`, `Bokeh`, `Plotly`, `Matplotlib` interactive charts,\n and `Pandas` `DataFrame` objects.\n\n Returns:\n ``File``: value object with converted object.\n\n Examples:\n >>> import neptune.new as neptune\n >>> from neptune.new.types import File\n >>> run = neptune.init()\n\n Convert Pandas DataFrame to File value object and upload it\n\n >>> run[\"train/results\"].upload(File.as_html(df_predictions))\n\n Convert Altair interactive chart to File value object and upload it\n\n >>> altair_file = File.as_html(altair_chart)\n >>> run[\"dataset/data_sample/img1\"].upload(altair_file)\n\n You can upload Altair interactive chart without explicit conversion\n\n >>> run[\"dataset/data_sample/img2\"].upload(altair_chart)\n\n You may also want to check `as_html docs page`_.\n\n .. _as_html docs page:\n https://docs.neptune.ai/api-reference/field-types#.as_html\n \"\"\"\n content = get_html_content(chart)\n return File.from_content(\n content if content is not None else \"\", extension=\"html\"\n )\n\n @staticmethod\n def as_pickle(obj) -> \"File\":\n \"\"\"Pickles a Python object and stores it in `File` value object.\n\n This way you can upload any Python object for future use.\n\n Args:\n obj: An object to be converted.\n Supported are `Altair`, `Bokeh`, `Plotly`, `Matplotlib` interactive charts,\n and `Pandas` `DataFrame` objects.\n\n Returns:\n ``File``: value object with pickled object.\n\n Examples:\n >>> import neptune.new as neptune\n >>> from neptune.new.types import File\n >>> run = neptune.init()\n\n Pickle model object and upload it\n\n >>> run[\"results/pickled_model\"].upload(File.as_pickle(trained_model))\n\n You may also want to check `as_pickle docs page`_.\n\n .. _as_pickle docs page:\n https://docs.neptune.ai/api-reference/field-types#.as_pickle\n \"\"\"\n content = get_pickle_content(obj)\n return File.from_content(\n content if content is not None else b\"\", extension=\"pkl\"\n )\n\n @staticmethod\n def create_from(value) -> \"File\":\n if isinstance(value, str):\n return File(path=value)\n elif is_pil_image(value) or is_matplotlib_figure(value):\n return File.as_image(value)\n elif (\n is_plotly_figure(value) or is_altair_chart(value) or is_bokeh_figure(value)\n ):\n return File.as_html(value)\n elif is_numpy_array(value):\n raise TypeError(\n \"Value of type {} is not supported. Please use File.as_image().\".format(\n type(value)\n )\n )\n elif is_pandas_dataframe(value):\n raise TypeError(\n \"Value of type {} is not supported. Please use File.as_html().\".format(\n type(value)\n )\n )\n elif isinstance(value, File):\n return value\n raise TypeError(\"Value of type {} is not supported.\".format(type(value)))\n\n @staticmethod\n def is_convertable(value):\n return (\n is_pil_image(value)\n or is_matplotlib_figure(value)\n or is_plotly_figure(value)\n or is_altair_chart(value)\n or is_bokeh_figure(value)\n or is_numpy_array(value)\n or is_pandas_dataframe(value)\n or isinstance(value, File)\n )\n","repo_name":"TomaszOdrzygozdz/AdaSubS_colab","sub_path":"third_party/neptune-client/neptune/new/types/atoms/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":9915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26865074857","text":"import cv2,os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_img(input_Path):\n img_paths = []\n for (path, dirs, files) in os.walk(input_Path):\n for filename in files:\n if filename.endswith(('.jpg','.png')):\n img_paths.append(path+'/'+filename)\n return img_paths\n\n\n#构建Gabor滤波器\ndef build_filters():\n filters = []\n ksize = [7,9,11,13,15,17] # gabor尺度,6个\n lamda = np.pi/2.0 # 波长\n for theta in np.arange(0, np.pi, np.pi / 4): #gabor方向,0°,45°,90°,135°,共四个\n for K in range(6):\n kern = cv2.getGaborKernel((ksize[K], ksize[K]), 1.0, theta, lamda, 0.5, 0, ktype=cv2.CV_32F)\n kern /= 1.5*kern.sum()\n filters.append(kern)\n plt.figure(1)\n\n #用于绘制滤波器\n for temp in range(len(filters)):\n plt.subplot(4, 6, temp + 1)\n plt.imshow(filters[temp])\n plt.show()\n return filters\n\n#Gabor特征提取\ndef getGabor(img,filters):\n res = [] #滤波结果\n for i in range(len(filters)):\n # res1 = process(img, filters[i])\n accum = np.zeros_like(img)\n for kern in filters[i]:\n fimg = cv2.filter2D(img, cv2.CV_8UC1, kern)\n accum = np.maximum(accum, fimg, accum)\n res.append(np.asarray(accum))\n\n #用于绘制滤波效果\n plt.figure(2)\n for temp in range(len(res)):\n plt.subplot(4,6,temp+1)\n plt.imshow(res[temp], cmap='gray' )\n plt.show()\n return res #返回滤波结果,结果为24幅图,按照gabor角度排列\n\n\nif __name__ == '__main__':\n img_path = os.path.join(os.getcwd(), 'img1.png')\n img = cv2.imread(img_path)\n filters = build_filters()\n getGabor(img, filters)\n","repo_name":"GuoxiW/CPMCM2019","sub_path":"code/question 2/gabor.py","file_name":"gabor.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"34019283188","text":"from Structures.Pair import Pair\nfrom Structures.ListPair import ListPair\nfrom Validator import Validator\nfrom Structures.Result import Result\n\n\nclass Little:\n\n INFINITY = 999999\n\n def __init__(self, matrix):\n self.validator = Validator()\n self.subject = matrix\n self.optima = Result('', self.INFINITY)\n self.matrixes = list()\n\n def run(self):\n self.validator.set_subject(self.subject.matrix)\n if not self.validator.validate():\n print('Validation error. Please check source file.\\n')\n return\n self.matrixes.append(self.subject.simplify())\n while self.matrixes:\n top = self.matrixes.pop()\n pair = self.find_division_pair(top)\n div = self.divide(top, pair)\n if div['first'].rate < self.optima.rate:\n self.matrixes.append(div['first'].simplify())\n if div['second'].is_ready() and div['second'].rate < self.optima.rate:\n div['second'] = div['second'].simplify()\n self.optima = Result(div['second'].harvest(), div['second'].rate)\n self.matrixes = [m for m in self.matrixes if m.rate < self.optima.rate]\n else:\n if div['second'].rate < self.optima.rate:\n self.matrixes.append(div['second'].simplify())\n return True\n\n def divide(self, matrix, pair):\n second = matrix.copy()\n second.check_cycles(pair)\n second.append_path(pair)\n second.remove_path(pair)\n matrix.matrix[pair.i][pair.j] = self.INFINITY\n return {'first': matrix, 'second': second}\n\n def find_division_pair(self, m):\n rates = ListPair()\n matrix = m.matrix\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 0:\n c1 = self.INFINITY\n c2 = self.INFINITY\n for k in range(len(matrix[i])):\n if c1 > matrix[i][k] and k != j:\n c1 = matrix[i][k]\n for z in range(len(matrix)):\n if c2 > matrix[z][j] and z != i:\n c2 = matrix[z][j]\n rates.append(Pair(i, j, c1 + c2))\n\n return rates.max()\n\n def print_result(self):\n print('Length: ' + str(self.optima.rate) + '\\n')\n print(self.optima.get_path_string())\n","repo_name":"swnsma/Little","sub_path":"Little.py","file_name":"Little.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13047230897","text":"from math import sin, cos, sqrt, atan2, radians\nimport time\nimport json\nimport sqlite3\n\nclass gps(object):\n def __init__(self):\n # Lap time calculation variables\n\n self.gate = None # Two GPS points that define a starting gate, if None lap times won't be calculated\n self.laps = [] # Values for each lap are stored as a dictionary in this list\n self.gps_current = [0,0] # Latest received GPS coordinate, used to check if path intersects starting gate\n self.gps_last = [0,0] # Second latest received GPS coordinate\n self.track = 0 # Primary key of track in sqlite database\n self.distance = 0 # Distance of current recording in kilometers, is incremented when GPS is received\n\n self.track_range = 3 # The distance in km the client has to be from a track to auto ditect the track ID\n\n self.start_time = 0\n self.current_time = 0\n\n self.sqlite_path = \"D:\\dev\\mission-control-github\\db.sqlite3\"\n\n def gps_gather(self, data): # Collect GPS coordinates to calculate lap times\n if data[0] == 'ts/gps/longitudinal':\n self.gps_current[0] = float(data[1])\n\n if data[0] == 'ts/gps/lateral':\n self.gps_current[1] = float(data[1])\n\n both_received = (self.gps_current[0] != self.gps_last[0]) and (self.gps_current[1] != self.gps_last[1])\n\n if both_received:\n self.current_time = time.time() - self.start_time\n\n return both_received # returnes true if both axis have been received\n\n def get_distance_traveled(self):\n if self.gps_last[0] != 0:\n self.distance += self.calc_distance(self.gps_current, self.gps_last)\n\n def calc_lap_time(self):\n # Check if GPS path intersects the starting line\n if self.gate is not None and self.gate != 0:\n\n if self.intersect(self.gps_current, self.gps_last, self.gate[0], self.gate[1]):\n\n # Get the lap number and append the latest lap\n i = len(self.laps)\n\n if i == 0:\n self.laps.append({\n 'id': i+1,\n 'total_time': round(self.current_time,2),\n 'distance': round(self.distance,3),\n 'lap_time': 0\n })\n\n else:\n time_delta = round(self.current_time - self.laps[i-1]['total_time'],2)\n\n self.laps.append({\n 'id': i+1,\n 'total_time': round(self.current_time,2),\n 'distance': round(self.distance,3),\n 'lap_time': time_delta\n })\n\n def set_start(self):\n self.start_time = time.time()\n self.gate = None\n self.laps = []\n self.gps_current = [0,0]\n self.gps_last = [0,0]\n self.track = 0\n self.distance = 0\n\n\n # Findes track in data base from gps location\n # returnes True if track has ben found\n def finde_track(self):\n status = False\n conn = sqlite3.connect(self.sqlite_path)\n c = conn.cursor()\n\n c.execute(\"select points from control_track\")\n nr_of_tracks = len(c.fetchall())\n #print(\"number of tracks in database\"+str(nr_of_tracks))\n\n for i in range(1,nr_of_tracks):\n c.execute(\"select points from telemetry_track where id=%i\" % i)\n\n try:\n gate = json.loads(c.fetchone()[0])['gate']\n distance = self.calc_distance(gate[0], self.gps_current)\n if distance < self.track_range:\n self.track = i\n self.gate = gate\n status = True\n print(\"Track found with ID: \" + str(self.track))\n break\n except Exception as e:\n print(\"Could not load gate points\")\n gate = None\n\n return status\n\n\n def get_track(self):\n return self.track\n\n def publish (self, client):\n gps_packet = {\n 'track': self.track,\n 'gps_lon': self.gps_current[0],\n 'gps_lat': self.gps_current[1],\n 'laps': self.laps\n }\n client.publish('control/gps',json.dumps(gps_packet))\n\n self.gps_last[0] = self.gps_current[0]\n self.gps_last[1] = self.gps_current[1]\n\n\n def calc_distance(self,x,y):\n dlon = radians(x[0]) - radians(y[0])\n dlat = radians(x[1]) - radians(y[1])\n\n # Calculate distance traveled\n a = sin(dlat / 2)**2 + cos(y[1]) * cos(x[1]) * sin(dlon / 2)**2\n return 2 * 6373 * atan2(sqrt(a), sqrt(1 - a))\n\n # Takes in endpoints of two lines as lists [x,y] and checks if the lines intersect or not\n def intersect(self, a1, a2, b1, b2):\n def f(x,y):\n return (x-a1[0])*(a2[1]-a1[1])-(y-a1[1])*(a2[0]-a1[0])\n\n def g(x,y):\n return (x-b1[0])*(b2[1]-b1[1])-(y-b1[1])*(b2[0]-b1[0])\n\n u = f(b1[0], b1[1])*f(b2[0], b2[1])\n v = g(a1[0], a1[1])*g(a2[0], a2[1])\n\n return (u < 0 and v < 0)\n","repo_name":"Ingimarsson/mission-control","sub_path":"backend/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"39845661785","text":"import cv2\nimport numpy as np\nimport os\n\ndef captura_dados_face():\n\n cap= cv2.VideoCapture(0, cv2.CAP_DSHOW)\n cascata_de_rosto = cv2.CascadeClassifier(cv2.haarcascades+ \"haarcascade_frontalface_alt.xml\")\n # cascata_de_olho = cv2.CascadeClassifier(cv2.haarcascades+ \"haarcascade_eye.xml\")\n # cascata_de_sorriso = cv2.CascadeClassifier(cv2.haarcascades+ \"haarcascade_smile.xml\")\n skip = 0\n dados_de_rosto = []\n dataset_path = \"./dados_de_rosto/\"\n\n nome_do_arquivo = input(\"Qual o nome desse rosto??\")\n ret, frame = cap.read()\n if ret != False:\n frameShape = frame.shape\n mask = np.zeros((frameShape[0], frameShape[1], 3), dtype=np.uint8)\n while True:\n ret,frame = cap.read()\n\n frame_escala_cinza = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n if ret == False:\n continue\n rostos = cascata_de_rosto.detectMultiScale(frame_escala_cinza,1.5,5)\n k=1\n rostos = sorted(rostos,key =lambda x : x[2]*x[3], reverse=True)\n skip +=1\n for rosto in rostos:\n x,y,w,h = rosto\n offset = 5\n rosto_offset = frame_escala_cinza[y-offset:y+h+offset,x-offset:x+w+offset]\n try:\n selecao_de_rosto = cv2.resize(rosto_offset,(100,100))\n except Exception as e:\n # ignora imagens nas quais algum rosto esteja próximo a bora\n print(str(e))\n if skip % 5 == 0:\n dados_de_rosto.append(rosto_offset)\n print(len(dados_de_rosto))\n\n for i in range (y, y + h):\n for j in range (x, x+w):\n mask[i][j][:] = 255\n cv2.imshow('mask', mask)\n blurredImage = cv2.GaussianBlur(frame, (35,35), 0)\n blurredFace = np.where(mask==np.array([255,255,255]), blurredImage, frame)\n\n cv2.imshow(str(k),selecao_de_rosto)\n k+=1\n\n cv2.imshow(\"rostos_borrados\", blurredFace )\n\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)\n # olhos = cascata_de_olho.detectMultiScale(frame_escala_cinza,1.5,5)\n # for olho in olhos:\n # ox, oy, ow, oh = olho\n # cv2.rectangle(frame, (ox, oy), (ox + ow, oy + oh), (0, 255, 0), 2)\n\n # sorrisos = cascata_de_sorriso.detectMultiScale(frame_escala_cinza,2,5)\n # for sorriso in sorrisos:\n # sx, sy, sw, sh = sorriso\n # cv2.rectangle(frame, (sx, sy), (sx + sw, sy + sh), (0, 255, 0), 2)\n\n cv2.imshow(\"rostos\",frame)\n key_pressed = cv2.waitKey(1) & 0xFF\n if key_pressed == ord('q'):\n break\n dados_de_rosto = np.array(dados_de_rosto)\n dados_de_rosto = dados_de_rosto.reshape((dados_de_rosto.shape[0],-1))\n\n\n # construct the directory string\n\n # check the directory does not exist\n if not (os.path.exists(dataset_path)):\n # create the directory you want to save to\n os.mkdir(dataset_path)\n\n ds = {\"ORE_MAX_GIORNATA\": 5}\n\n np.save(dataset_path + nome_do_arquivo, dados_de_rosto)\n\n cap.release()\n cv2.destroyAllWindows()","repo_name":"vinikuia/pdi","sub_path":"dados_de_rosto.py","file_name":"dados_de_rosto.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30468179398","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n \n \n maximum = [0] * len(prices)\n maximum[-1] = prices[-1]\n \n for index in range(len(prices) - 2, -1 , -1):\n maximum[index] = max(prices[index], maximum[index + 1])\n \n \n answer = 0\n minimum = prices[0]\n print(maximum)\n for index in range(1, len(prices)):\n \n answer = max(maximum[index] - minimum, answer)\n minimum = min(minimum, prices[index])\n \n return answer\n \n ","repo_name":"Olyadtemesgen/Competitive-Programming","sub_path":"0121-best-time-to-buy-and-sell-stock/0121-best-time-to-buy-and-sell-stock.py","file_name":"0121-best-time-to-buy-and-sell-stock.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25570961677","text":"from gameTypes import *\nimport pyglet\nfrom util import *\nfrom pygletUtil import *\nfrom level import Level\n\nclass Map:\n blocksAnims = {} # Анимация блоков для получения спрайтов\n # @@ Имена блоков по типам. (вынести куда-то?)\n blockImgNames = {BlockType.ruby : ('block1anim/ruby', 7),\n BlockType.pearl: ('block6anim/pearl', 7),\n BlockType.emerald: ('block5anim/emerald', 7)}\n levelsNames = ['lv0.lev'] # имена уровней\n levels = [] # сам уровень формата [(blockType, x, y),...]\n levelsCoords = [] # координаты иконок уровней на карте\n levelsCaps = [] # спрайты иконок уровней на карте\n levelsDone = [] # пройденные уровни\n currentLevel = None\n blockWindowTopStart = 500\n blockWindowLeft = 500\n background = None\n capRadius = 30\n\n def __init__(self, width, height, mapname):\n self.blockWindowTopStart = height\n self.blockWindowLeft = width\n self.blocksAnims = dict()\n self.levels = list()\n self.levelsCoords = list()\n self.levelsCaps = list()\n self.levelsDone = list() # Список пройденных уровней ! загружать\n self.loadMap(mapname)\n self.loadBlockImages()\n self.loadLevels()\n\n def loadMap(self, mapname):\n with open(mapname, 'rt') as fin:\n self.background = fin.readline()[:-1] #without \\n\n levelDir = fin.readline()[:-1]\n levelCap = pyglet.image.load('levelCap.png')\n center_image(levelCap)\n #Load Levels Coords\n for l in fin:\n l = l.split()\n x = int(l[0])\n y = int(l[1])\n self.levelsCoords += [(x, y)]\n self.levelsCaps += [Sprite(levelCap, x=x, y=y)]\n #print(self.background, levelDir, self.levelsCoords)\n #load levels list\n self.levelsNames = list()\n from os import listdir\n from os.path import isfile, join\n self.levelsNames = [levelDir + f for f in listdir(levelDir) if isfile(join(levelDir, f))]\n self.levelsNames = list(filter(lambda x: x.find('.lev') > -1, self.levelsNames))\n self.levelsNames.sort()\n #load bg\n self.background = Sprite(pyglet.image.load(self.background))\n\n def loadLevels(self):\n \"\"\" Загрузка всех уровней карты. \"\"\"\n for name in self.levelsNames:\n lev = Level(name, self.blockWindowLeft, self.blockWindowTopStart)\n self.levels += [lev]\n lev.loadBlocks(self)\n self.currentLevel = self.levels[0]\n\n def loadBlockImages(self):\n \"\"\" загрузка изображений блоков. \"\"\"\n for (b, (imgBaseName, framesCount)) in self.blockImgNames.items():\n frames = list()\n for i in range(1, framesCount+1):\n img = pyglet.image.load(imgBaseName + str(i) + '.png')\n center_image(img)\n frames += [pyglet.image.AnimationFrame(img, 0.2)]\n anim = pyglet.image.Animation(frames)\n self.blocksAnims[b] = anim\n self.levels = list()\n\n def getBlockAnim(self, btype):\n return self.blocksAnims[btype]\n\n def getDrawables(self):\n return self.currentLevel.getDrawables()\n\n def draw(self):\n self.background.draw()\n for x in self.levelsCaps:\n x.draw()\n\n def loadLevel(self, n):\n self.currentLevel = self.levels[n]\n","repo_name":"anokata/pythonPetProjects","sub_path":"pong/modules/Mmap.py","file_name":"Mmap.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"30559343410","text":"\"\"\"\nFunctions:\n - transs\n - calc_GCF\n\n\"\"\"\nimport numpy\nimport math\n\n\ndef transs(lmax: int, nn: int, zeta: float, sthovl: numpy.ndarray):\n \"\"\"\n Calculate integral.\n\n $$\n ( r**nn * exp(-zeta r) * j_l(Qr) r**2 dr) / Q**l\n $$\n\n for given l\n\n Q = 4 pi sint / lambda\n\n \"\"\"\n Q = 4. * math.pi * sthovl\n a = [0.*Q for h in range(17)]\n ff = numpy.zeros(shape=(sthovl.size, lmax+1), dtype=float)\n d = Q**2+zeta**2\n a[0] = 0.0*Q\n a[1] = 1./d\n n = nn-1\n tz = 2.*zeta\n ts = 4.*math.pi\n for l in range(lmax+1):\n ll = l+1\n if (ll != 1):\n a[ll] = a[ll-1] * ts * l * 1./d\n a[ll-1] = 0.0*Q\n for nx in range(ll, n+1):\n i1 = nx\n i2 = nx+1\n i3 = i2+1\n a[i3-1] = (tz*nx*a[i2-1] - (nx+l)*(nx-ll)*a[i1-1]) * 1./d\n ff[:, ll-1] = a[i3-1]\n return ff\n\ndef calc_GCF(ln1: list, ldzeta1: list, lcoeff1: list, kappa1: float,\n ln2: list, ldzeta2: list, lcoeff2: list, kappa2: float,\n sthovl: float, lmax: float):\n GCF = numpy.zeros(shape=(sthovl.size, lmax+1), dtype=float)\n for n1, dzeta1, coeff1 in zip(ln1, ldzeta1, lcoeff1):\n # transformation from atomic units to inverse angstrems\n zetaA1 = dzeta1*kappa1/0.529177 \n Norm1 = math.sqrt(((2.*zetaA1)**(2*n1+1))/math.factorial(2*n1))\n for n2, dzeta2, coeff2 in zip(ln2, ldzeta2, lcoeff2):\n # transformation from atomic units to inverse angstrems\n zetaA2 = dzeta2*kappa2/0.529177\n Norm2 = math.sqrt(((2.*zetaA2)**(2*n2+1))/math.factorial(2*n2))\n\n nn, zeta = (n1+n2), (zetaA1+zetaA2)\n GCF += coeff1*coeff2*Norm1*Norm2*transs(lmax, nn, zeta, sthovl)\n return GCF\n\n","repo_name":"bsramosl/ProsPy","sub_path":"venv/Lib/site-packages/cryspy/A_functions_base/function_1_rhocif.py","file_name":"function_1_rhocif.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72233996713","text":"import torch\nimport tqdm, gc, time\nfrom sklearn.metrics import roc_auc_score, log_loss\nfrom torch.utils.data import DataLoader\nfrom models.emb_MLPs import *\n\nfrom dataset import AvazuDataset, Movielens1MDataset, CriteoDataset\n\n\ndef get_dataset(name, path):\n if name == 'movielens1M' or name == 'movielens1M_inter':\n return Movielens1MDataset(path)\n elif name == 'avazu':\n return AvazuDataset(path)\n elif name == 'criteo':\n return CriteoDataset(path)\n\n\ndef get_model(name,args):\n if name == 'MLP':\n return select_MLP(args)\n\n\nclass EarlyStopper(object):\n\n def __init__(self, num_trials, save_path):\n self.num_trials = num_trials\n self.trial_counter = 0\n self.best_accuracy = 0\n self.save_path = save_path\n\n def is_continuable(self, model, accuracy):\n if accuracy > self.best_accuracy:\n self.best_accuracy = accuracy\n self.trial_counter = 0\n if model.stage == 1:\n model.desicion = decide(model)\n torch.save({'state_dict': model.state_dict()}, self.save_path) # torch.save(model, self.save_path)\n return True\n elif self.trial_counter + 1 < self.num_trials:\n self.trial_counter += 1\n return True\n else:\n return False\n # Load model: model = describe_model(); checkpoint = torch.load('checkpoint.pth.tar'); model.load_state_dict(checkpoint['state_dict'])\n\n\ndef train(model, optimizer, optimizer_model, optimizer_darts, train_data_loader, valid_data_loader, criterion, device, log_interval, AutoML, darts_frequency):\n model.train()\n total_loss = 0\n tk0 = tqdm.tqdm(train_data_loader, smoothing=0, mininterval=1.0)\n valid_data_loader_iter = iter(valid_data_loader)\n #min_space_ratio, best_decision = 1.0, []\n # val_fields, val_target = [], []\n for i, (fields, target) in enumerate(tk0):\n # if model.stage == 1: val_fields.append(fields); val_target.append(target)\n fields, target = fields.to(device), target.to(device)\n y = model(fields)\n loss = criterion(y, target.float())\n model.zero_grad()\n loss.backward()\n # for layer_name, param in model.named_parameters(): print('0', layer_name, param[0])\n\n # supervised training, jointly update main RS network and Darts weights\n if not AutoML:\n optimizer.step()\n\n # pretrain/test stage, only update main RS network\n if AutoML and model.stage in [0, 2]:\n optimizer_model.step()\n\n # search stage, alternatively update main RS network and Darts weights\n if AutoML and model.stage == 1:\n optimizer_model.step()\n if (i + 1) % darts_frequency == 0:\n # fields, target = torch.cat(val_fields, 0), torch.cat(val_target, 0); val_fields, val_target = [], []\n try:\n fields, target = next(valid_data_loader_iter)\n except StopIteration:\n del valid_data_loader_iter\n gc.collect()\n valid_data_loader_iter = iter(valid_data_loader)\n fields, target = next(valid_data_loader_iter)\n fields, target = fields.to(device), target.to(device)\n y = model(fields)\n loss_val = criterion(y, target.float())\n\n model.zero_grad()\n loss_val.backward()\n optimizer_darts.step()\n\n # if ((i + 1) % (darts_frequency * 50) == 0 or i == len(tk0) - 1) and model.stage == 1:\n # decision = decide(model)\n # if ratio < min_space_ratio:\n # min_space_ratio = ratio\n # best_decision = decision\n\n total_loss += loss.item()\n if (i + 1) % log_interval == 0:\n tk0.set_postfix(loss=total_loss / log_interval)\n total_loss = 0\n # if i > 100: break\n\n\n\ndef test(model, data_loader, device):\n model.eval()\n targets, predicts, infer_time = list(), list(), list()\n with torch.no_grad():\n for fields, target in tqdm.tqdm(data_loader, smoothing=0, mininterval=1.0):\n fields, target = fields.to(device), target.to(device)\n start = time.time()\n y = model(fields)\n infer_cost = time.time() - start\n targets.extend(target.tolist())\n predicts.extend(y.tolist())\n infer_time.append(infer_cost)\n return roc_auc_score(targets, predicts), log_loss(targets, predicts), sum(infer_time)\n\n\n\ndef decide(model):\n dec_f = model.dec_f\n if dec_f == 0:\n p = torch.softmax(model.weights.deep_weights,dim=0)\n # _, index = p.sort(dim=0,descending=True)\n # index = index.detach().cpu().numpy()\n # decision = [0] * len(index)\n # for i in index[:11]:\n # decision[i] =1\n base = p[-1]\n decision = []\n for i in p[:-1]:\n if i > base:\n decision.append(1)\n else:\n decision.append(0)\n if dec_f == 1:\n # SoftMAX selection with drop n\n _, index = torch.softmax(model.weights.deep_weights,dim=0)[:,-1].sort(dim=0,descending=True)\n index = index.detach().cpu().numpy()\n decision = [0] * len(index)\n num = 11#int(len(index)/2) # 8,10,11 is not ok for criteo \n for i in index[:num]:\n decision[i] =1\n if dec_f == 2:\n # Hard Selection\n decision = torch.argmax(model.weights.deep_weights, dim=1).detach().cpu().numpy()\n return decision\n\ndef main(dataset_name,\n dataset_path,\n model_name,\n args,\n epoch,\n learning_rate,\n learning_rate_darts,\n batch_size,\n AutoML,\n darts_frequency,\n weight_decay,\n device,\n save_dir):\n device = torch.device(device)\n dataset = get_dataset(dataset_name, dataset_path)\n train_length = int(len(dataset) * 0.8)\n valid_length = int(len(dataset) * 0.1)\n test_length = len(dataset) - train_length - valid_length\n train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(\n dataset, (train_length, valid_length, test_length), generator=torch.Generator().manual_seed(42))\n\n train_data_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8)\n valid_data_loader = DataLoader(valid_dataset, batch_size=batch_size*2, num_workers=8)\n test_data_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8)\n\n model = get_model(model_name, args).to(device)\n criterion = torch.nn.BCELoss()\n optimizer = torch.optim.Adam(params=model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n optimizer_model = torch.optim.Adam(params=[param for name, param in model.named_parameters() if name != 'weights.deep_weights'], lr=learning_rate, weight_decay=weight_decay)\n optimizer_darts = torch.optim.Adam(params=[param for name, param in model.named_parameters() if name == 'weights.deep_weights'], lr=learning_rate_darts, weight_decay=weight_decay)\n\n # print('\\n*********************************************Pretrain*********************************************\\n')\n # model.stage = 0\n # early_stopper = EarlyStopper(num_trials=3, save_path=f'{save_dir}/{model_name}:{model.stage}.pt')\n # for epoch_i in range(epoch[0]):\n # print('Pretrain epoch:', epoch_i)\n # train(model, optimizer, optimizer_model, optimizer_darts, train_data_loader, valid_data_loader, criterion, device, 100, AutoML, darts_frequency)\n\n # auc, logloss,infer_time = test(model, valid_data_loader, device)\n # if not early_stopper.is_continuable(model, auc):\n # print(f'validation: best auc: {early_stopper.best_accuracy}')\n # break\n # print('Pretrain epoch:', epoch_i, 'validation: auc:', auc, 'logloss:', logloss)\n\n # auc, logloss,infer_time = test(model, test_data_loader, device)\n # print(f'Pretrain test auc: {auc} logloss: {logloss}, infer time:{infer_time}\\n')\n\n print('\\n********************************************* Search *********************************************\\n')\n model.stage = 1\n early_stopper = EarlyStopper(num_trials=3, save_path=f'{save_dir}/{model_name}:{model.stage}.pt')\n for epoch_i in range(epoch[1]):\n print('Search epoch:', epoch_i)\n train(model, optimizer, optimizer_model, optimizer_darts, train_data_loader, valid_data_loader, criterion, device, 100, AutoML, darts_frequency)\n auc, logloss,_ = test(model, valid_data_loader, device)\n print(model.weights.deep_weights)\n print(f'decision = {decide(model)}')\n if auc > early_stopper.best_accuracy:decision_best = decide(model)\n if not early_stopper.is_continuable(model, auc):\n print(f'validation: best auc: {early_stopper.best_accuracy}')\n print(f'decision = {decision_best}')\n break\n \n print('Search epoch:', epoch_i, 'validation: auc:', auc, 'logloss:', logloss)\n\n # if epoch_i > 0 and ratio < min_space_ratio: # epoch 0 is not stable\n # min_space_ratio, best_decision = ratio, decision\n # print(f'min_space_ratio = {min_space_ratio}, best_decision = {best_decision}')\n \n\n auc, logloss, _ = test(model, test_data_loader, device)\n print(f'Search test auc: {auc} logloss: {logloss}\\n')\n \n #params = sum(np.array(decision) * args.field_dims)/sum(args.field_dims)\n #print(params)\n print('\\n********************************************* Test *********************************************\\n')\n time_retrain = time.time()\n model.stage = 2\n model = MLP(args,decision_best).to(device)\n optimizer_model = torch.optim.Adam(params=model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n early_stopper = EarlyStopper(num_trials=3, save_path=f'{save_dir}/{model_name}_test.pt')\n for epoch_i in range(epoch[2]):\n print('Test epoch:', epoch_i)\n train(model, optimizer, optimizer_model, optimizer_darts, train_data_loader, valid_data_loader, criterion, device, 100, AutoML, darts_frequency)\n auc, logloss, infer_time = test(model, valid_data_loader, device)\n \n print('Test epoch:', epoch_i, 'validation: auc:', auc, 'logloss:', logloss,'infer time:', infer_time)\n\n if not early_stopper.is_continuable(model, auc):\n print(f'Test validation: best auc: {early_stopper.best_accuracy}')\n break\n\n auc, logloss,infer_time = test(model, test_data_loader, device)\n print(f'Test test auc: {auc} logloss: {logloss} infer time: {infer_time}\\n ')\n print(model.decision)\n print(f'Retrain Time:{time.time()-time_retrain}')\n with open('Record/%s.txt'%dataset_name, 'a') as the_file:\n the_file.write('Dataset:%s\\nRetrain Time:%.2f,Retrain Epoches: %d\\n Decision:%s,\\ntest auc:%.8f,logloss:%.8f, infer time:%.8f\\n'\n %(dataset_name,(time.time()-time_retrain)/60, epoch_i+1, str(model.decision),auc,logloss,infer_time))\n\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_name', default='movielens1M',\n help='criteo, avazu, movielens1M, movielens20M, movielens1Mfield_noBinary, movielens1Mfield_noGenres, movielens1Mfield_all')\n parser.add_argument('--dataset_path', default='ml-1m/train.txt',\n help='criteo/train.txt, avazu/train, ml-1m/ratings.dat, ml-20m/rating.csv, ml-1m/train_field_no_binary.txt, ml-1m/train_field_no_genres.txt, ml-1m/train_field_all.txt')\n parser.add_argument('--model_name', default='MLP', help='lr, fm, hofm, ffm, fnn, wd, ipnn, opnn, dcn, fnfm, dfm, xdfm, afm, afi, afn, nfm, ncf')\n parser.add_argument('--mlp_dims', type=int, default=[16,8], help='original=16')\n parser.add_argument('--embed_dim', type=int, default=16, help='original=16')\n parser.add_argument('--softmax_type', type=int, default=0, help='0 softmax; 1 softmax+temperature; 2 gumbel-softmax')\n parser.add_argument('--epoch', type=int, default=[0,8,50], nargs='+', help='pretrain/search/test epochs')\n parser.add_argument('--learning_rate', type=float, default=0.001)\n parser.add_argument('--learning_rate_darts', type=float, default=0.001)\n parser.add_argument('--batch_size', type=int, default=2048)\n parser.add_argument('--AutoML', type=bool, default=True)\n parser.add_argument('--darts_frequency', type=int, default=10)\n parser.add_argument('--weight_decay', type=float, default=1e-6)\n parser.add_argument('--dropout',type=int, default=0.2)\n parser.add_argument('--device', default='cuda:0' if torch.cuda.is_available() else 'cpu', help='cuda:0')\n parser.add_argument('--save_dir', default='chkpt')\n parser.add_argument('--decide_f',default=1, help='Decide funtion to use. 1: Hand select, 2: auto select by argmax')\n parser.add_argument('--add_zero',default=False, help='Whether to add a useless feature')\n args = parser.parse_args()\n\n if args.dataset_name == 'criteo': dataset_path = 'criteo/train.txt'\n if args.dataset_name == 'avazu': dataset_path = 'avazu/train'\n if args.dataset_name == 'movielens1M': dataset_path = 'ml-1m/train.txt'\n if args.dataset_name == 'movielens1M_inter': dataset_path = 'ml-1m/train_inter_part_noid.txt'\n if args.dataset_name == 'movielens1M':\n args.field_dims = [3706,301,81,6040,21,7,2,3402]\n elif args.dataset_name == 'movielens1M_inter':\n args.field_dims = [3706, 301, 81, 6040, 21, 7, 2, 3402, 1524, 6100, 2076, 601, 1648, 561, 162,134, 42, 5349, 14, 4794, 4079]\n #[3706, 301, 81, 6040, 21, 7, 2, 3402, 59112, 22457, 7152, 1524, 6100, 2076, 601, 268050, 1648, 561, 162,130340,134, 42, 5349, 14, 4794, 4079]\n #[3706,301,81,6040,21,7,2,3402,3706,3706,1000209,59112,22457,7152,871096,1524,352172,6100,2076,601,268050,195382,1648,561,162,130340,6040,6040,6040,6040,134,42,5349,14,4794,4079]\n elif args.dataset_name == 'avazu':\n args.field_dims = [241, 8, 8, 3697, 4614, 25, 5481, 329, \n 31, 381763, 1611748, 6793, 6, 5, 2509, 9, 10, 432, 5, 68, 169, 61]\n elif args.dataset_name == 'criteo':\n args.field_dims = [ 49, 101, 126, 45, 223, 118, 84, 76,\n 95, 9, 30, 40, 75, 1458, 555, 193949,\n 138801, 306, 19, 11970, 634, 4, 42646, 5178,\n 192773, 3175, 27, 11422, 181075, 11, 4654, 2032,\n 5, 189657, 18, 16, 59697, 86, 45571]\n if args.add_zero:\n args.field_dims.append(0)\n print(f'\\ndataset_name = {args.dataset_name},\\t',\n f'dataset_path = {dataset_path},\\t',\n f'model_name = {args.model_name},\\t',\n f'mlp_dim = {args.mlp_dims},\\t',\n f'softmax_type = {args.softmax_type},\\t',\n f'epoch = {args.epoch},\\t',\n f'learning_rate = {args.learning_rate},\\t',\n f'learning_rate_darts = {args.learning_rate_darts},\\t',\n f'batch_size = {args.batch_size},\\t',\n f'AutoML = {args.AutoML},\\t',\n f'darts_frequency = {args.darts_frequency},\\t',\n f'weight_decay = {args.weight_decay},\\t',\n f'device = {args.device},\\t',\n f'save_dir = {args.save_dir}\\n')\n for i in range(10):\n time_start = time.time()\n main(args.dataset_name,\n dataset_path,\n args.model_name,\n args,\n args.epoch,\n args.learning_rate,\n args.learning_rate_darts,\n args.batch_size,\n args.AutoML,\n args.darts_frequency,\n args.weight_decay,\n args.device,\n args.save_dir)\n\n print(f'\\ndataset_name = {args.dataset_name},\\t',\n f'dataset_path = {dataset_path},\\t',\n f'model_name = {args.model_name},\\t',\n f'mlp_dim = {args.mlp_dims},\\t',\n f'softmax_type = {args.softmax_type},\\t',\n f'epoch = {args.epoch},\\t',\n f'learning_rate = {args.learning_rate},\\t',\n f'learning_rate_darts = {args.learning_rate_darts},\\t',\n f'batch_size = {args.batch_size},\\t',\n f'AutoML = {args.AutoML},\\t',\n f'darts_frequency = {args.darts_frequency},\\t',\n f'weight_decay = {args.weight_decay},\\t',\n f'device = {args.device},\\t',\n f'save_dir = {args.save_dir},\\t',\n f'training time = {(time.time() - time_start) / 3600}\\n')","repo_name":"Dave-AdamsWANG/AutoField","sub_path":"main_trian.py","file_name":"main_trian.py","file_ext":"py","file_size_in_byte":16471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30451931705","text":"from django.conf.urls import url\n\nfrom wagtail.core import hooks\nfrom wagtail.api.v2.router import WagtailAPIRouter\nfrom wagtail.api.v2.views import PagesAPIViewSet\nfrom wagtail.api.v2.utils import BadRequestError, parse_fields_parameter\n\n\nclass GemPagesAdminApi(PagesAPIViewSet):\n def get_queryset(self):\n self.queryset = super(GemPagesAdminApi, self).get_queryset()\n return self.queryset\n\n def get_serializer_class(self):\n if self.action == 'listing_view':\n show_details = False\n model = getattr(self, 'queryset', self.get_queryset()).model\n else:\n # Allow \"detail_only\" (eg parent) fields on detail view\n show_details = True\n model = type(self.get_object())\n\n # Fields\n try:\n fields_config = parse_fields_parameter(\n self.request.GET.get('fields', []))\n except ValueError as e:\n raise BadRequestError(\"fields error: %s\" % str(e))\n\n return self._get_serializer_class(\n self.request.wagtailapi_router,\n model, fields_config, show_details=show_details\n )\n\n\nadmin_api = WagtailAPIRouter('gem_wagtailadmin_api_v1')\nadmin_api.register_endpoint('pages', GemPagesAdminApi)\n\nfor fn in hooks.get_hooks('construct_admin_api'):\n fn(admin_api)\n\nurlpatterns = [\n url(r'^v2beta/', admin_api.urls),\n]\n","repo_name":"praekeltfoundation/molo-gem","sub_path":"gem/admin_api.py","file_name":"admin_api.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"21356996064","text":"\"\"\" Desafio 060\nFaça um programa que leia um número qualquer e mostre o seu fatorial. Ex.: 5! = 5x4x3x2x1=120. Fazer com while e for \"\"\"\n\n# Solução 1: Usando while\n\nn = int(input('Informe um número: '))\n\nnum = n\nfat = 1\n\nwhile n != 1:\n fat *= n\n n -= 1\n\nprint('O fatorial de {}! é {}'.format(num, fat))\n\n# Solução 2: Usando for\n\nn = int(input('Informe um número: '))\nfat = 1\n\nfor c in range(n, 1, -1):\n fat *= c\nprint('O fatorial de {}! é {}'.format(n, fat))\n\n# Solução 3: Usando modulo, disponibilidado pelo Curso em Video\n\nfrom math import factorial\n\nn = int(input('Digite um número para calcular seu fatorial: '))\nf = factorial(n)\nprint('O fatorial de {} é {}.'.format(n, f))\n","repo_name":"nataliaqsoares/Curso-em-Video","sub_path":"Mundo 02/desafio060 - Calculo do fatorial.py","file_name":"desafio060 - Calculo do fatorial.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42416330878","text":"from settings import *\nfrom mongoengine import *\nconnect(\"paste2image\",host=DB_HOST,port=DB_PORT)\n\nfrom datetime import datetime\nimport random\n\ncodebase = 'abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\ndef random_string(length=6):\n s = []\n for _ in range(length):\n s.append( random.choice(codebase) )\n return ''.join(s)\n\nclass Pasted(Document):\n pid = StringField(max_length=8)\n content = StringField(max_length=65536)\n created = DateTimeField(default=datetime.now)\n image = FileField()\n def __init__(self, *args, **kwargs):\n super(Pasted, self).__init__(*args, **kwargs)\n if not self.pid:\n while True:\n self.pid = random_string(length=6)\n try:\n Pasted.objects.get(pid=self.pid)\n except Pasted.DoesNotExist:\n break\n self.save()\n\n\n","repo_name":"observerss/paste2image","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"25282367807","text":"import json\n\nfrom .headers import Headers, make_headers\nfrom .types import HeadersType\nfrom .utils import import_from_string\n\n\nclass Config:\n \"\"\"Baguette application configuration.\n\n Keyword Arguments\n -----------------\n debug : :class:`bool`\n Whether to run the application in debug mode.\n Default: ``False``.\n\n default_headers : :class:`list` of ``(str, str)`` tuples, \\\n :class:`dict` or :class:`Headers`\n Default headers to include in every response.\n Default: No headers.\n\n static_url_path : :class:`str`\n URL path for the static file handler.\n Default: ``\"static\"``.\n\n static_directory : :class:`str`\n Path to the folder containing static files.\n Default: ``\"static\"``.\n\n templates_directory : :class:`str`\n Path to the folder containing the HTML templates.\n Default: ``\"templates\"``.\n\n error_response_type : :class:`str`\n Type of response to use in case of error.\n One of: ``\"plain\"``, ``\"json\"``, ``\"html\"``.\n Default: ``\"plain\"``.\n\n error_include_description : :class:`bool`\n Whether to include the error description in the response\n in case of error.\n If debug is ``True``, this will also be ``True``.\n Default: ``True``.\n\n\n Attributes\n ----------\n debug : :class:`bool`\n Whether the application is running in debug mode.\n\n default_headers : :class:`Headers`\n Default headers included in every response.\n\n static_url_path : :class:`str`\n URL path for the static file handler.\n\n static_directory : :class:`str`\n Path to the folder containing static files.\n\n templates_directory : :class:`str`\n Path to the folder containing the HTML templates.\n\n error_response_type : :class:`str`\n Type of response to use in case of error.\n One of: ``\"plain\"``, ``\"json\"``, ``\"html\"``\n\n error_include_description : :class:`bool`\n Whether the error description is included in the response\n in case of error.\n If debug is ``True``, this will also be ``True``.\n \"\"\"\n\n def __init__(\n self,\n *,\n debug: bool = False,\n default_headers: HeadersType = None,\n static_url_path: str = \"static\",\n static_directory: str = \"static\",\n templates_directory: str = \"static\",\n error_response_type: str = \"plain\",\n error_include_description: bool = True,\n ):\n self.debug = debug\n self.default_headers: Headers = make_headers(default_headers)\n\n self.static_url_path = static_url_path\n self.static_directory = static_directory\n self.templates_directory = templates_directory\n\n if error_response_type not in (\"plain\", \"json\", \"html\"):\n raise ValueError(\n \"Bad response type. Must be one of: 'plain', 'json', 'html'\"\n )\n self.error_response_type = error_response_type\n self.error_include_description = error_include_description or self.debug\n\n @classmethod\n def from_json(cls, filename: str) -> \"Config\":\n \"\"\"Loads the configuration from a JSON file.\n\n Arguments\n ---------\n filename : :class:`str`\n The file name of the JSON config file.\n\n Returns\n -------\n :class:`Config`\n The loaded configuration.\n \"\"\"\n\n with open(filename) as config_file:\n config = json.load(config_file)\n\n if not isinstance(config, dict):\n raise ValueError(\n f\"Configuration must be a dictionary. Got: {type(config)}\"\n )\n\n return cls(**config)\n\n @classmethod\n def from_class(cls, class_or_module_name) -> \"Config\":\n \"\"\"Loads the configuration from a python class.\n\n Arguments\n ---------\n class_or_module_name : class or :class:`str`\n The class to load the configuration.\n\n Returns\n -------\n :class:`Config`\n The loaded configuration.\n \"\"\"\n\n if isinstance(class_or_module_name, str):\n config_class = import_from_string(class_or_module_name)\n else:\n config_class = class_or_module_name\n\n config = {}\n for attribute in (\n \"debug\",\n \"default_headers\",\n \"static_url_path\",\n \"static_directory\",\n \"templates_directory\",\n \"error_response_type\",\n \"error_include_description\",\n ):\n if hasattr(config_class, attribute):\n config[attribute] = getattr(config_class, attribute)\n elif hasattr(config_class, attribute.upper()):\n config[attribute] = getattr(config_class, attribute.upper())\n\n return cls(**config)\n","repo_name":"takos22/baguette","sub_path":"baguette/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"12628271372","text":"# 导入相关的库\nimport matplotlib\nimport jieba\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\n# 加载并处理分词\ntext = open(r\"C:\\Users\\QDM\\Desktop\\SAP出库价\\9-07\\ciyun.txt\",\n encoding=\"gbk\").read()\ntext = text.replace('\\n', \"\").replace(\"\\u3000\", \"\")\ntext_cut = jieba.lcut(text)\ntext_cut = ' '.join(text_cut)\n\nstop_words = open(r\"C:\\Users\\QDM\\Desktop\\SAP出库价\\9-07\\ciyun.txt\",\n encoding=\"gbk\").read().split(\"\\n\")\n\n# 读取背景图片,也可以输入中文\nbackground = Image.open(r\"C:\\Users\\QDM\\Desktop\\fu.png\")\ngraph = np.array(background)\n\nword_cloud = WordCloud(font_path=\"simsun.ttc\",\n background_color=\"white\",\n mask=graph, # 指定词云的形状\n stopwords=stop_words)\n\n# 渲染效果\nword_cloud.generate(text_cut)\nplt.subplots(figsize=(12, 8))\nplt.imshow(word_cloud)\nplt.axis(\"off\")\n","repo_name":"suika3046/project01","sub_path":"2.Java Script/01 JavaScript基础/第4天/code/词云图.py","file_name":"词云图.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18715356795","text":"#!/usr/bin/env python\nimport pathlib\nimport time\nimport rfid\nimport mqtt\n\nrelative_path = str(pathlib.Path(__file__).parent.resolve()) + \"/\"\n\ndef arrayFromFile(filename):\n array = []\n with open(filename) as file:\n for line in file:\n line = line.rstrip().lstrip()\n if line != \"\":\n array.append(line)\n return array\n\nwhitelist = []\nblacklist = []\n\ndef on_card_read(id, text):\n print(id)\n if str(id) in blacklist:\n on_blacklisted_card(id, text)\n elif str(id) in whitelist:\n on_whitelisted_card(id, text)\n else:\n on_unlisted_card(id, text)\n time.sleep(3)\n mqtt.clear_level()\n\ndef on_whitelisted_card(id, text):\n mqtt.on_level_2()\n\ndef on_blacklisted_card(id, text):\n mqtt.on_level_0()\n\ndef on_unlisted_card(id, text):\n mqtt.publish_id(id)\n mqtt.on_level_1()\n\nif __name__ == '__main__': \n mqtt.start()\n try:\n whitelist = arrayFromFile(relative_path + \"whitelist\")\n blacklist = arrayFromFile(relative_path + \"blacklist\")\n\n print(\"Starting reader\")\n while True:\n rfid.readCard(on_card_read)\n time.sleep(2)\n except Exception as e:\n print(e)\n mqtt.stop()\n exit()","repo_name":"DanielGrenehed/OELC","sub_path":"RFID_Publisher/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39848507984","text":"from collections import OrderedDict\nfrom inspect import Parameter, signature\nfrom typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, Type, Union\n\nimport apsw\n\nfrom irobot.precache.db._types import Adaptor, Convertor, SQLite\nfrom irobot.precache.db._udf import AggregateUDF, aggregate_udf_factory_factory\n\n# Type aliases\n_PyBindings = Union[Tuple[Any, ...], Dict[str, Any]]\n_SQLiteBindings = Union[Tuple[SQLite, ...], Dict[str, SQLite]]\n\n_Adaptors = Dict[Type, Adaptor]\n_Convertors = Dict[str, Convertor]\n\n\nclass Cursor(Iterator):\n \"\"\"\n Cursor implementation that adds adaptor and convertor support to the\n default APSW cursor\n \"\"\"\n def __init__(self, native_cursor: \"apsw.Cursor\") -> None:\n \"\"\"\n Constructor\n \n @param native_cursor APSW Cursor\n \"\"\"\n self._cursor = native_cursor\n\n # Get adaptors and convertors from parent connection\n conn = self._cursor.getconnection()\n self._adaptors = conn._adaptors\n self._convertors = conn._convertors\n\n def __iter__(self) -> \"Cursor\":\n return self\n\n def __next__(self) -> Tuple:\n \"\"\"\n Fetch the next row of data from the cursor and convert values\n for any matching SQLite declarations\n\n @return Row of data\n \"\"\"\n data = next(self._cursor)\n desc = self._cursor.getdescription()\n\n return tuple(\n self._convertors.get(type_decl, lambda x: x)(value)\n for value, (_col_name, type_decl)\n in zip(data, desc)\n )\n\n def _adapt_pyval(self, pyval: Any) -> SQLite:\n \"\"\"\n Adapt a Python value to a native SQLite type\n\n @param pyval Python value\n @return Native SQLite value\n \"\"\"\n pytype = type(pyval)\n\n if pyval is None or pytype in [str, bytes, int, float]:\n # Pass through already native types\n return pyval\n\n try:\n # Try to adapt non-native types\n return self._adaptors[pytype](pyval)\n\n except KeyError:\n raise TypeError(f\"No adaptor for {pytype.__name__} type\")\n\n def _adapt_bindings(self, bindings: _PyBindings) -> _SQLiteBindings:\n \"\"\"\n Adapt bind variables to native SQLite types\n\n @param bindings Bind variables (Python types)\n @return Bind variables (SQLite types)\n \"\"\"\n if isinstance(bindings, Tuple):\n return tuple(map(self._adapt_pyval, bindings))\n\n elif isinstance(bindings, Dict):\n return {k: self._adapt_pyval(v) for k, v in bindings.items()}\n\n else:\n raise TypeError(\"Invalid bindings; should be a tuple or dictionary\")\n\n def execute(self, sql: str, bindings: Optional[_PyBindings]=None) -> \"Cursor\":\n \"\"\"\n Executes the SQL statements with the specified bindings\n\n @param sql SQL statements (string)\n @param bindings Bind variables\n @return Cursor to execution\n \"\"\"\n sqlite_bindings = self._adapt_bindings(bindings) if bindings else None\n return Cursor(self._cursor.execute(sql, sqlite_bindings))\n\n def executemany(self, sql: str, binding_seq: Sequence[_PyBindings]) -> \"Cursor\":\n \"\"\"\n Executes the SQL statements with a sequence of bindings\n\n @param sql SQL statements (string)\n @param binding_seq Sequence of bind variables\n @return Cursor to execution\n \"\"\"\n sqlite_binding_seq = [self._adapt_bindings(v) for v in binding_seq]\n return Cursor(self._cursor.executemany(sql, sqlite_binding_seq))\n\n def fetchone(self) -> Optional[Tuple]:\n \"\"\"\n Fetch the next row of data from the cursor\n\n @return Row of data (tuple; None on no more data)\n \"\"\"\n try:\n return next(self)\n\n except StopIteration:\n return None\n\n def fetchall(self) -> List[Tuple]:\n \"\"\"\n Fetch all the remaining rows of data from the cursor\n\n @return Rows of data (list)\n \"\"\"\n return list(self)\n\n\nclass Connection(apsw.Connection):\n \"\"\"\n Subtyped APSW connection that allows us to register adaptors and\n convertors and which returns a cursor that supports them, as well as\n a more convenient interface for registering aggregate UDFs\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\" Constructor \"\"\"\n super().__init__(*args, **kwargs)\n self._adaptors: _Adaptors = {}\n self._convertors: _Convertors = {}\n\n def cursor(self) -> Cursor:\n \"\"\"\n Create a new cursor on this connection\n\n @return Cursor\n \"\"\"\n return Cursor(super().cursor())\n\n def register_aggregate_function(self, name: str, udf: Type[AggregateUDF]) -> None:\n \"\"\"\n Register an aggregation function using an AggregateUDF\n implementation\n\n @param udf Aggregate function implementation (AggregateUDF)\n \"\"\"\n # The first parameter is self, so we cut that off\n param_kinds = list(map(lambda x: x.kind,\n list(OrderedDict(signature(udf.step).parameters).values())[1:]))\n\n if any(p in [Parameter.KEYWORD_ONLY, Parameter.VAR_KEYWORD] for p in param_kinds):\n raise TypeError(f\"Aggregate function {udf.__name__} has an invalid step signature\")\n\n num_args = -1 if any(p == Parameter.VAR_POSITIONAL for p in param_kinds) else len(param_kinds)\n\n assert len(name) < 255, f\"\\\"{name}\\\" name is too long for aggregate function\"\n self.createaggregatefunction(name, aggregate_udf_factory_factory(udf), num_args)\n\n def register_adaptor(self, t: Type, adaptor: Adaptor) -> None:\n \"\"\"\n Register a type adaptor\n\n @param t Type\n @param adaptor Adaptor function (callable)\n \"\"\"\n self._adaptors[t] = adaptor\n\n def register_convertor(self, decl: str, convertor: Convertor) -> None:\n \"\"\"\n Register a type convertor\n\n @param decl Declared type (string)\n @param convertor Convertor function (callable)\n \"\"\"\n self._convertors[decl] = convertor\n","repo_name":"wtsi-hgi/irobot","sub_path":"irobot/precache/db/_dbi.py","file_name":"_dbi.py","file_ext":"py","file_size_in_byte":6187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31860776257","text":"import sys\n\nstring1 = sys.stdin.readline().rstrip()\nstring2 = sys.stdin.readline().rstrip()\n\ndp = [[0 for _ in range(len(string1)+1)] for _ in range(len(string2)+1)]\n\nfor i in range(1,len(string2)+1):\n for j in range(1,len(string1)+1):\n if string2[i-1] == string1[j-1]:\n dp[i][j] = dp[i-1][j-1] +1\n else:\n dp[i][j] = max(dp[i-1][j],dp[i][j-1])\n\nmax_length = dp[len(string2)][len(string1)]\n\n\nx = len(string2)\ny = len(string1)\n\nret = []\nwhile dp[x][y] != 0:\n if string1[y-1] == string2[x-1]:\n ret.append(string1[y-1])\n x -= 1\n y -= 1\n else:\n if dp[x-1][y] > dp[x][y-1]:\n x = x-1\n else:\n y = y-1\n\nprint(max_length)\nfor i in ret[::-1]:\n print(i,end='')","repo_name":"chanyoung1998/Algorithm-Study","sub_path":"백준 문제풀기/class5/algorithm_9252.py","file_name":"algorithm_9252.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22800433925","text":"# Problem Description\n# Write a program to input an integer N from user and print\n# hollow diamond star pattern series of N lines.\n#\n# See example for clarifications over the pattern.\n#\n# Problem Constraints\n# 1 <= N <= 1000\n#\n# Input Format: First line is an integer N\n#\n# Output Format: N lines containing only char '*' as per the question.\n#\n# Example Input\n# Input 1: 4\n# Input 2: 6\n#\n# Example Output\n# Output 1:-\n# ********\n# *** ***\n# ** **\n# * *\n# * *\n# ** **\n# *** ***\n# ********\n#\n# Output 2:-\n# ************\n# ***** *****\n# **** ****\n# *** ***\n# ** **\n# * *\n# * *\n# ** **\n# *** ***\n# **** ****\n# ***** *****\n# ************\n\ndef main():\n # YOUR CODE GOES HERE\n # Please take input and print output to standard input/output (stdin/stdout)\n # E.g. 'input()/raw_input()' for input & 'print' for output\n n = int(input())\n for i in range(n):\n space_req = i + i\n stars_req = (2 * n - space_req) // 2\n print(\"*\" * stars_req + \" \" * space_req + \"*\" * stars_req)\n\n for i in range(n):\n i = n - i - 1\n space_req = i + i\n stars_req = (2 * n - space_req) // 2\n print(\"*\" * stars_req + \" \" * space_req + \"*\" * stars_req)\n\n return 0\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Abhilash-du/daily-coding-challenges","sub_path":"DataStructures/Introduction/StarPatternI.py","file_name":"StarPatternI.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25230983431","text":"import random\n\nplayerwin = 0\ncompwin = 0\nprint(\"First to 3 wins! Good luck.\")\n\n\ndef win(user,computer):\n if (user == \"r\" and computer == \"s\") or (user == \"p\" and computer == \"r\") or (user == \"s\" and computer == \"p\"):\n return True\n\nwhile playerwin or compwin != 3:\n if playerwin == 3:\n print(f\"Game over! You win {playerwin} - {compwin}!\")\n quit()\n\n if compwin == 3:\n print(f\"Game over! I win {compwin} - {playerwin}!\")\n quit()\n \n print(f\"You: {playerwin}, me: {compwin}\")\n playerc = input(\"What do you choose? Rock (r), paper (p) or scissors (s)?\")\n player = playerc.lower()\n comp = random.choice([\"r\",\"p\",\"s\"])\n \n if player == comp:\n print(f\"It's a tie! We both picked {comp}\")\n\n elif win(player,comp):\n playerwin = playerwin +1\n print(f\"Congrats! You won. I picked {comp}\")\n else:\n compwin = compwin +1\n print(f\"Sorry! You lost. I picked {comp}\")","repo_name":"jtucker85/Rock-Paper-Scissors","sub_path":"rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14825665309","text":"import operator\nfrom functools import partial\nfrom typing import Any, Callable, Dict\n\nfrom sympy import Expr\n\nimport torch\nfrom torch.fx.experimental.symbolic_shapes import free_symbols\nfrom torch.utils._sympy.value_ranges import bound_sympy, ValueRangeAnalysis, ValueRanges\nfrom .ir import InterpreterShim, LoopBody, LoopBodyBlock\nfrom .utils import cache_on_self, dominated_nodes\nfrom .virtualized import V\n\n\nclass BoundVars:\n \"\"\"\n Performs Value Range Analysis on LoopBody's fx graph by calling BoundVars.run()\n It exposes the ranges of the nodes in the `bounds` variable\n\n Note. A current limitation of this analysis is that it just works on a per-loop basis.\n We should be able to propagate the bounds between across the whole graph. This may benefit\n the case a bounded variable is returned by a kernel and fed into another.\n \"\"\"\n\n def __init__(self, loop_body: LoopBody) -> None:\n self.loop_body = loop_body\n self.replacement_vals = {\n k: ValueRanges(0, v - 1) if not free_symbols(v) else bound_sympy(v)\n for k, v in loop_body.var_ranges.items()\n }\n # avoid computing these values, pessimistically assume that they are unbounded\n self.unbounded_vars = dominated_nodes(\n node\n for node in self.loop_body.get_nodes()\n if node.target in [\"load\", \"reduction\", operator.getitem]\n or \"masked_subblock\" in node.target\n )\n # To access this variable call `get_bounds()`\n self._bounds: Dict[torch.fx.Node, ValueRanges] = {}\n\n @cache_on_self\n def get_bounds(self) -> Dict[torch.fx.Node, ValueRanges]:\n submodules = self.swap_submodules(self.loop_body.submodules)\n\n # Initialize the environment with the unbounded variables\n for node in self.unbounded_vars:\n # we need to evaluate masked_subblock to recurse, and we need to set indirect values\n if not isinstance(node.target, str) or (\n \"masked_subblock\" not in node.target\n and \"set_indirect\" not in node.target\n ):\n self._bounds[node] = ValueRanges.unknown()\n\n with V.set_ops_handler(ValueRangeAnalysis()):\n interpreter = InterpreterShim(self.loop_body.root_block.graph, submodules)\n interpreter.run(V.get_ops_handler(), initial_env=self._bounds)\n return self._bounds\n\n def swap_submodules(\n self, submodules: Dict[str, Callable[..., Any]]\n ) -> Dict[str, Callable[..., ValueRanges]]:\n result: Dict[str, Callable[..., ValueRanges]] = {}\n for key in submodules.keys():\n if key == \"get_index\":\n result[key] = self.get_index\n elif \"masked_subblock\" in key:\n subblock = self.loop_body.subblocks[key]\n # The result within the lambda will reference to the final\n # set of modules at the end of the for-loop as it stores a reference to it\n\n # bind subblock in a function because python lambdas close over by reference\n # moving the lambda out of make_fn would close over the reference to subblock,\n # so all lambdas would have the same subblock reference that is the final\n # subblock in the loop\n def make_fn(subblock):\n return lambda mask, value: self.masked_subblock(\n subblock, self._bounds, mask, value, result\n )\n\n result[key] = make_fn(subblock)\n\n else:\n assert \"set_indirect\" in key\n idx = int(key[len(\"set_indirect\") :])\n var = self.loop_body.indirect_vars[idx]\n indirect = partial(self.set_indirect, var)\n result[key] = indirect\n\n return result\n\n def masked_subblock(\n self,\n subblock: LoopBodyBlock,\n env: Dict[torch.fx.Node, ValueRanges],\n mask: Any,\n value: Any,\n submodules: Dict[str, Callable[..., Any]],\n ) -> ValueRanges:\n interp = InterpreterShim(subblock.graph, submodules)\n interp.run(V.get_ops_handler(), initial_env=env)\n output = [node for node in subblock.graph.nodes if node.target == \"output\"]\n assert len(output) == 1\n # dont bother unioning with value since the load from buffer will be\n # pessimistically assumed to be inf anyway\n return interp.env[output[0]]\n\n def set_indirect(self, old: Expr, new: ValueRanges) -> ValueRanges:\n assert isinstance(new, ValueRanges)\n self.replacement_vals[old] = new\n return new\n\n def get_index(self, name: Expr) -> ValueRanges:\n expr = self.loop_body.indexing_exprs[name]\n bound = self.replacement_vals.get(expr)\n if bound is None:\n bound = bound_sympy(expr, self.replacement_vals)\n # The following assertion is true at the time of this writing\n # We don't assert is as to not execute bound_sympy when bound is not None\n # assert bound is None or bound == bound_sympy(expr, self.replacement_vals)\n self.replacement_vals[name] = bound\n return bound\n","repo_name":"pytorch/pytorch","sub_path":"torch/_inductor/bounds.py","file_name":"bounds.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"3261040577","text":"from common.browser import Browser\nfrom config.config import *\nimport unittest\nimport time\n\nclass Yishi_firmware_library(Browser):\n _testMethodDoc = '固件库管理'\n\n # @unittest.skip\n def test_lib_upload_01(self):\n '''跳转固件库页面'''\n super().firmware_lib_page()\n try:\n self.assertIn('添加固件', self.driver.page_source)\n except Exception as e:\n print(self.test_lib_upload_01.__doc__ + '运行失败', e)\n self.driver.get_screenshot_as_file(image_dir + self.test_lib_upload_01.__doc__ + '.png')\n\n def test_firm_upload_02(self):\n super().firmware_lib_page()\n super().firm_upload_click()\n time.sleep(3)\n super().file_upload_click(r'C:\\Users\\anban\\Desktop\\gujianhuizong\\jizhunceshi\\libcurl_7.17.1-1_arm.ipk')\n time.sleep(5)\n # try:\n # self.assertIn('添加任务', self.driver.page_source)\n # except Exception as e:\n # print(self.test_lib_upload_01.__doc__ + '运行失败', e)\n # self.driver.get_screenshot_as_file(image_dir + self.test_lib_upload_01.__doc__ + '.png')\n\nif __name__ == '__main__':\n Yishi_firmware_library()","repo_name":"jkpark1206/UI_firmware_upload","sub_path":"testcases/yishi_bFirmware library.py","file_name":"yishi_bFirmware library.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14800909242","text":"\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport os\nfrom streamlit_d3graph import d3graph\nfrom datetime import datetime\n\n\ndef app():\n # d3 = d3graph()\n # Load karate example\n # adjmat, df = d3.import_example('karate')\n # Process the adjacency matrix\n # d3.graph(adjmat)\n\n # Set node properties\n # hover = '\\nId: ' + adjmat.columns.astype(str) +'\\nDegree: ' + df['degree'].astype(str) + '\\nLabel: ' + df['label'].values\n # hover = hover.values\n # label = df['label'].values\n\n # # Set node properties\n # d3.set_node_properties(label=label, hover=hover, color=label)\n\n # Plot\n # d3.show()\n\n data=pd.read_csv(os.path.join(r'C:\\Users\\yosty\\Desktop\\Desktop_Folder\\14 - git\\IMF-DOTS-Project\\00-data\\dots\\clean\\dotsTimeSeries.csv'))\n\n data['period']=pd.to_datetime(data['period'])\n\n st.sidebar.text(\"Always first day of the month\")\n period=st.sidebar.date_input(\n label='Period',\n value=data['period'].min())\n\n datafilter=data[data['period']==period]\n\n datafilter=datafilter.pivot_table(index='ReferenceArea', columns='CounterpartReferenceArea', values='value')\n\n datafilter=datafilter.replace(np.nan, 0)\n datafilter=datafilter[datafilter.columns[datafilter.columns.isin(datafilter.index)]]\n\n datafilter.index=datafilter.columns\n\n\n # testdf=pd.DataFrame({\n # 'one':[1 for x in range(4)],\n # 'two':[1 for x in range(4)],\n # 'three':[1 for x in range(4)],\n # 'four':[1 for x in range(4)]})\n\n # l=300\n # keepL=[]\n # for i in range(l):\n # keepL.append(pd.DataFrame({str(i):[1 for x in range(l)]}))\n\n\n # testdf=pd.concat(keepL, axis=1)\n\n # testdf.index=testdf.columns\n\n d3 = d3graph()\n d3.graph(datafilter)\n\n d3.show()\n\n\n\n\n\n\n","repo_name":"rcyost/Trade-Dashboard","sub_path":"pages/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6213778081","text":"import docx\nfrom tqdm import tqdm\n\npbar = tqdm(total=150782)\ndoc = docx.Document()\ndoc2 = docx.Document()\ncount = 0\nwith open('./dataset/train.raw', 'r') as r:\n while True:\n line = r.readline()\n if not line:\n break\n q = line.rstrip('\\n')\n if count <= 75392:\n doc.add_paragraph(q)\n else:\n doc2.add_paragraph(q)\n a = r.readline().rstrip('\\n')\n if count <= 75392:\n doc.add_paragraph(a)\n else:\n doc2.add_paragraph(a)\n pbar.update(2)\n count += 2\ndoc.save('./dataset/train_en_part1.docx')\ndoc2.save('./dataset/train_en_part2.docx')\npbar.close()","repo_name":"InoriJam/Opinion-Polarity-Classification-with-RoBERTa-Large","sub_path":"raw2docx.py","file_name":"raw2docx.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25590536894","text":"valid_tags = set([\n 'Acid',\n 'Air',\n 'Attune (ritual)',\n 'Attune (ritual; see text)',\n 'Attune (self)',\n 'Attune (target)',\n 'Auditory',\n 'Cold',\n 'Compulsion',\n 'Creation',\n 'Curse',\n 'Emotion',\n 'Detection',\n 'Earth',\n 'Electricity',\n 'Fire',\n 'Flesh',\n 'Life',\n 'Light',\n 'Manifestation',\n 'Mind',\n 'Mystic',\n 'Physical',\n 'Planar',\n 'Poison',\n 'Scrying',\n '(see text)',\n 'Sensation',\n 'Shaping',\n 'Shielding',\n 'Sizing',\n 'Speech',\n 'Subtle',\n 'Sustain (standard)',\n 'Sustain (minor)',\n 'Swift',\n 'Teleportation',\n 'Temporal',\n 'Trap',\n 'Visual',\n 'Water',\n])\n\ndef glosstermify(tag):\n if tag == '(see text)':\n return tag\n elif ' ' in tag:\n split_tag = tag.split()\n return f\"\\\\glossterm<{split_tag[0]}> {' '.join(split_tag[1:])}\"\n else:\n return f\"\\\\glossterm<{tag}>\"\n\ndef is_valid_tag(tag):\n return tag in valid_tags\n","repo_name":"grantrobertsmith/Rise","sub_path":"python/rise/latex/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"26508160369","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 3 15:20:16 2022\r\n\r\n@author: jesus\r\n\"\"\"\r\nimport numpy as np\r\n\r\n\r\ndef plateau(t_ini=0.0, t_rise=0.1, t_dec=0.1, t_fin=0.5, N=10000):\r\n\r\n t = np.linspace(t_ini, t_fin, int(N))\r\n f = np.zeros(int(N))\r\n\r\n for i, s in enumerate(t):\r\n\r\n if t[i] < (t_ini + t_rise):\r\n f[i] = t[i]/t_rise\r\n if t[i] >= (t_ini + t_rise):\r\n if t[i] < (t_fin - t_dec):\r\n f[i] = 1\r\n if t[i] >= (t_fin - t_dec):\r\n f[i] = t_fin/t_dec-t[i]/t_dec\r\n\r\n return f, t\r\n\r\n\r\ndef plateau_exp(t_ini=0.0, t_fin=0.5, C=40, N_t=10000):\r\n\r\n t_mid = (t_fin-t_ini)/2\r\n\r\n t_dens = np.linspace(t_ini, t_fin, int(N_t))\r\n\r\n dens_ev = np.subtract(1, np.exp(-C*(t_dens-t_ini)))*(t_dens <= t_mid) \\\r\n + np.subtract(1, np.exp(C*(t_dens-t_fin)))*(t_dens > t_mid)\r\n\r\n return dens_ev, t_dens\r\n","repo_name":"Jesus-Salas-Suarez-Barcena/SINTE","sub_path":"Plateau.py","file_name":"Plateau.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72750540393","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'graficas'\n\nurlpatterns = [\n\turl(r'^$', views.ReportesView.as_view(), name=\"index\"),\n\turl(r'^reporte/$', views.ReporteView.as_view(), name='reporte'),\n]","repo_name":"FalconJ/ColdRoom","sub_path":"graficas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37779701620","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport sys\nimport tensorflow.contrib.eager as tfe\n\nimport time, timeit\n\n# input[0] (E,G,C,M) (10, 1, 8, 16)\nE = 20\nG = 1\nC = 512\nM = 1024\n\nbatch = E * C\nlen1 = 1\nlen2 = 1\nlen3 = M\n\n# input[1] (E,M,H) (10,16,18)\nH = 9984\n\ncustom_module_lib_file = \"../build/src/tf_op/libcustomop.so\"\ncustom_module = tf.load_op_library(custom_module_lib_file)\n\nnp_data = np.random.choice([0,1,0,0,0,0], batch) # (batch,)\nnp_data = np_data.astype(np.float32)\n\nnp_data = np.expand_dims(np_data, axis=1) # (batch, 1)\nnp_data = np.tile(np_data, (1, len1*len2*len3))\nnp_data = np.reshape(np_data, (batch, len1, len2, len3))\n\ntf_data = tf.constant(np_data, dtype=tf.float32)\n\nnp_data1 = np.reshape(np_data, (E,G,C,M))\ntf_data1 = tf.constant(np_data1, dtype=tf.float32)\nnp_data2 = np.full((E,M,H), 2.0)\ntf_data2 = tf.constant(np_data2, dtype=tf.float32)\nres_orig = tf.einsum('EGCM, EMH->EGCH', tf_data1, tf_data2)\n\ntbc1 = timeit.default_timer()\noutput_indices = custom_module.non_zero_index_grad_gpu(tf_data)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n np_indices=sess.run(output_indices)\n #print(np_indices)\n #tf_data1 = sess.run(tf_data1)\n #tf_data2 = sess.run(tf_data2)\n tic0 = timeit.default_timer()\n data2 = sess.run(res_orig)\n toc0 = timeit.default_timer()\n print(\"time for the ref TF: %.4f ms\"%((toc0-tic0)*1000))\n exit()\n #print(data2.shape)\n #for i in range(data2.shape[0]):\n #print(data2[i])\n\ndef check_result(input_data, indices):\n # check the result\n for i in range(batch):\n a = input_data[i, 0, 0, 0]\n if (a > 0):\n if (i != indices[i] - 1):\n print(a)\n print(i)\n print(indices[i])\n return False\n else:\n if (indices[i] != 0):\n print(a)\n print(i)\n print(indices[i])\n return False\n return True\n\nif check_result(np_data, np_indices):\n print(\"Test Passed!\")\nelse:\n print(\"Test Failed!\")\n\ne_group = np.split(np_indices, E, axis=0)\n\n# Get max valid num in C axis \ndef max_valid_num(group):\n # output_indices shape is (E, G, C)\n #output_indices = np.reshape(output_indices, [-1])\n #print(e_group)\n max_zero_counter = 0\n non_zero = np.count_nonzero(group, axis=1)\n #print(\"non_zero: \", non_zero)\n max_num_ = np.max(non_zero)\n print(max_num_)\n return max_num_\n\nmax_num = max_valid_num(e_group)\n\n# Get non zero position in (E,G,C)\ndef nonzero_position(group):\n group = np.array(group)\n group = np.reshape(group, (E,G,C))\n position_ = np.argwhere(group > 0)\n # position shape is (x, 3), for example [9, 0, 6]. It's index of valid line\n #print(position_)\n print(\"position shape \", position_.shape)\n return position_\n\nposition = nonzero_position(e_group)\n\n# reshape to (-1,1,1,3)\nposition4 = np.reshape(position, (-1, 1, 1, 3))\n#print(position4)\n\nnp_data = np.reshape(np_data, (E, G, C, M))\n\ntf_data = tf.convert_to_tensor(np_data)\n\n# position_test = np.array([\n# [[[0,0,0],[0,0,1]]],\n# [[[1,0,2],[1,0,0]]]\n# ])\n\ntf_data_v = tf.gather_nd(tf_data, position4)\nwith tf.Session() as sess:\n #print(\"tf_data:\")\n #print(sess.run(tf_data))\n #print(\"tf_data_v:\")\n tf_data_v = sess.run(tf_data_v)\n #tf_data_v = tf_data_v.eval()\n #print(tf_data_v)\n print(\"tf_data_v shape: \", tf_data_v.shape)\n\n# tf_data_v shape (x, 1, 1, 32)\n# position shape (x, 3), for example [9, 0, 6]\nindex = 0\nrow0 = np.array([[[[0]*M]]])\n\nfor i in range(E):\n for j in range(max_num):\n if index < position.shape[0] and position[index][0] == i:\n #print(\"i \", i, \" j\", j, \" index \", index)\n index += 1\n else:\n #print(\"i \", i, \" j\", j)\n tf_data_v = np.insert(tf_data_v, i * max_num + j, row0, axis = 0)\n #print(\"#### row0 shape \", row0.shape)\n #print(\"### tf_data_v shape \", tf_data_v.shape)\nprint(\"### tf_data_v shape \", tf_data_v.shape, \" shape0 \", tf_data_v.shape[0])\n#print(tf_data_v)\n\n#for i in range(tf_data_v.shape[0]):\n# print(tf_data_v[i])\n\ndata_v = np.reshape(tf_data_v, (E, G, max_num, M))\n#print(\"data_v shape :\", data_v.shape)\n#print(data_v)\n\ntf_data1_v = tf.constant(data_v, dtype=tf.float32)\nres_v = tf.einsum('EGCM, EMH->EGCH', tf_data1_v, tf_data2)\n\n# Get valid data position from padding result res_v\nprint(position.shape)\nindex_0 = 0\npos = 0\nposition_v = np.arange(position.shape[0])\nfor i in range(E * max_num):\n if (pos >= position.shape[0]):\n break\n if position[pos][0] == index_0:\n position_v[pos] = i\n pos += 1\n if (i + 1) % max_num == 0:\n index_0 += 1\n#print(position_v)\n#position_v = np.reshape(position_v, (1, position.shape[0]))\n\nposition_v_pad = np.arange(position.shape[0])\nfor i in range(position.shape[0]):\n position_v_pad[i] = position[i][0] * C + position[i][2]\n\nres_v = tf.reshape(res_v, [E*G*max_num, 1, 1, H])\ntic1 = timeit.default_timer()\n## Begin to valid einsum\nwith tf.Session() as sess:\n res_v = sess.run(res_v)\n #print(res_v)\n res_v = res_v[position_v]\n print(\"rev_v shape \", res_v.shape)\n #print(res_v)\n #print(position_v)\n v_indices = tf.constant(position_v_pad, dtype=tf.int32)\n v_indices = tf.reshape(v_indices, [position_v_pad.shape[0], 1])\n v_shape = tf.constant([E*G*C, 1, 1, H])\n res_v_pad = tf.scatter_nd(v_indices, res_v, v_shape)\n res_v_pad = tf.reshape(res_v_pad, (E,G,C,H))\n res_v_pad = sess.run(res_v_pad)\n #print(res_v_pad.shape)\n\ntoc1 = timeit.default_timer()\nprint(\"time for the ref TF: %.4f ms\"%((toc1-tic1)*1000))\nprint(\"time for the ref TF: %.4f ms\"%((toc1-tbc1)*1000))\n \n#for i in range(res_v_pad.shape[0]):\n #print(res_v_pad[i])","repo_name":"cding-nv/study","sub_path":"cuda/einsum/sample/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10782057304","text":"import time\n\nstart = time.perf_counter()\n\n\ndef do_something():\n print('Sleeping 1 second')\n time.sleep(1)\n print('Done Sleeping...')\n\n\nfor _ in range(10):\n do_something()\n\nfinish = time.perf_counter()\n\n# Run Code Concurrently Using the Threading Module\nprint(f'Finished in {round(finish-start, 2)} second(s)')\n# Result : Finished in 10.07 second(s)\n","repo_name":"sandeepyadav10011995/Data-Structures","sub_path":"Threading/threading-demo-1.1.py","file_name":"threading-demo-1.1.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12083885496","text":"#!/usr/bin/env python3\n\n\"\"\"\nsplit_qr_exam\n=============\n\nThis script splits a pdf composed of scanned exams. The input is a pdf file containing multiple\nscanned exames. On each exam cover there should be a QR code identifying the student writing this\nexam.\nFor each part of the exam a folder will be created and for each student a pdf will be created in\nthat folder to allow for parallel evaluation. To ensure pseudonymity the files are named with the\nsha256 hash of the student id.\n\nFor this to work, you have to specify all the parts of the exam with the corresponding amount of\npages.\n\"\"\"\n\nimport sys\nimport os\nimport logging\nimport math\nimport argparse\nimport hashlib\nimport uuid\n\nimport numpy as np\n\nimport fitz\nimport cv2\n\n#def show_img(img):\n# cv2.imshow(\"image\", img)\n# cv2.waitKey(3000)\n# cv2.destroyAllWindows()\n\nclass NoQRCodeFoundException(Exception):\n \"\"\"Exception thrown if no QR code is found on a page.\n\n Should only be thrown, if multiple attempts at multiple thresholds were attempted.\"\"\"\n\nclass NoCoverFoundException(Exception):\n \"\"\"Exception thrown if no page with a QR code is found.\n\n Should only be thrown, if all pages of an exam are tested.\"\"\"\n\nclass QRScanner:\n \"\"\"Class that handles the scanning of QR codes\n\n Intended use is creating an instance of the class once, and using it to process all neccessary\n images.\n\n Attributes:\n threshold: A value between 0 and 255, indicating what amount of grey will be interpreted\n as black or white. Higher values lead to more black, lower values lead to more\n white. No threshold will be applied, if the value is None.\n shrink: Shinks the image to this width in the detection phase.\n crop: A tuple of fraction coordinates, representing the area to crop the image to before\n scanning for QR codes. The first element represents the upper left corner of the area\n the second element represents the lower right corner.\n E.g.: ((0,0),(0.5,0.5)) would take the top left quartal of the image,\n ((0,0.5),(0,1)) would take the left half of the image,\n ((0.25,0.25), (0.75, 0.75)) would take the center of the image.\n If any value is greater than 1 or less than 0, it will be set to 1 or 0 respectively.\n\n Example use:\n qr_scanner = QRScanner(127, 600)\n try:\n qr_string = qr_scanner.get_qr_string(image)\n print(qr_string)\n except NoQRCodeFoundException:\n print(\"Could not find any QR codes :(\")\n \"\"\"\n\n def __init__(self, threshold, shrink, crop):\n \"\"\"Initializes all attributes of the object\"\"\"\n\n self.threshold = threshold\n if self.threshold is not None and self.threshold > 255:\n logging.warning(\"Threshold above 255, setting to 255\")\n self.threshold = 255\n elif self.threshold is not None and self.threshold < 0:\n logging.warning(\"Threshold below 0, setting to 0\")\n self.threshold = 0\n self.shrink = shrink\n self._qr_instance = cv2.QRCodeDetector()\n if crop is not None:\n if crop[0][0] > 1:\n crop[0][0] = 1\n if crop[0][0] < 0:\n crop[0][0] = 0\n if crop[0][1] > 1:\n crop[0][1] = 1\n if crop[0][1] < 0:\n crop[0][1] = 0\n if crop[1][0] > 1:\n crop[1][0] = 1\n if crop[1][0] < 0:\n crop[1][0] = 0\n if crop[1][1] > 1:\n crop[1][1] = 1\n if crop[1][1] < 0:\n crop[1][1] = 0\n self.crop = crop\n\n def get_qr_string(self, image):\n \"\"\"Decodes a QR code on page `pagenr` and returns the string.\n\n Args:\n image: The image to extract the string from.\n\n The image is first cropped, shrunk down and cleaned up, to improve performance and accuracy.\n If no code can be found after using these two operations, a second attempt is made for each\n threshold between 0 and 255, but not shrinking down the image.\n\n Returns:\n The string encoded in the QR code in the image\n\n Raises:\n NoQRCodeFoundException: If no QR code can be found\n \"\"\"\n\n cropped_image = self.crop_image(image)\n qr_coordinates = self.detect_qr_code(cropped_image)\n if qr_coordinates is None:\n qr_coordinates = self.detect_qr_code_slow(cropped_image)\n if qr_coordinates is None:\n raise NoQRCodeFoundException()\n return self.decode_qr_code(cropped_image, qr_coordinates)\n\n def crop_image(self, image):\n \"\"\"Croppes the image\n\n This operation does not create a new image. It returns a slice view of the original image.\n Relevant attributes for this method are: self.crop\n\n Args:\n image: The image to slice\n\n Returns:\n A cropped portion of the image. Or the original image, if self.crop is None.\n \"\"\"\n\n if self.crop is None:\n return image\n width, height, _ = image.shape\n x_start, new_width = int(self.crop[0][0] * width), int(self.crop[1][0] * width)\n y_start, new_height = int(self.crop[0][1] * height), int(self.crop[1][1] * height)\n return image[x_start:new_width, y_start:new_height]\n\n def shrink_image(self, image):\n \"\"\"Resizes an image to a width of `self.shrink`\n\n This function creates a new, resized image with width `self.shrink`. This operation\n preserves the aspect ratio of the input image.\n\n Args:\n image: The image to shrink\n\n Returns:\n A tuple consisting of\n * The input image shrunk to the width of `self.shrink`\n * The floating point scaling factor used to shrink the image\n \"\"\"\n\n factor = image.shape[0] / self.shrink\n returns = cv2.resize(image, None, fx=(1/factor), fy=(1/factor)), factor\n return returns\n\n\n def clean_up(self, image, overwrite_threshold=None):\n \"\"\"Cleans up a greyscale image by removing the grey parts\n\n The higher the threshold, the more grey is interpreted as black. The lower the threshold,\n the more grey is interpreted as white.\n\n Args:\n image: The image to clean up\n overwrite_threshold: If set, use this value instead of `self.threshold`.\n\n Returns:\n A new monochrome image.\n \"\"\"\n\n threshold = self.threshold if overwrite_threshold is None else overwrite_threshold\n\n return cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)[1]\n\n def detect_qr_code(self, image, no_shrink=False, overwrite_threshold=None):\n \"\"\"Finds the location of a QR code in an image using an optimized method.\n\n First a shrinking operation is performed. Since the mere detection of a QR code does not\n need all the details of the image, this operation does not impact the accuracy of the\n detection severly, but helps greatly to improve the performance.\n Then a clean up operation is performed. This converts the greyscale input image to a black\n and white image, thereby cleaning up the QR code and improve the accuracy of the scan.\n\n Args:\n image: The image to process\n no_shrink: If True, skips the shrinking operation\n overwrite_threshold: If set, uses this value instead of `self.threshold` in the\n clean_up phase\n\n Returns:\n A numpy float array of coordinates if a QR code is found, otherwise it returns None.\n If more than one QR code is found... that would be undefined behaviour.\n \"\"\"\n\n mini_image, factor = self.shrink_image(image) \\\n if self.shrink is not None and not no_shrink else (image, 1)\n mini_cleaned_up = self.clean_up(\n mini_image, overwrite_threshold=overwrite_threshold\n ) if self.threshold is not None else image\n found, coordinates = self._qr_instance.detect(mini_cleaned_up)\n if not found:\n return None\n return coordinates * factor\n\n def detect_qr_code_slow(self, image):\n \"\"\"Finds a QR code in in an image\n\n This should only be called, if a call to `detect_qr_code` was unsuccessful.\n\n Instead of shrinking and cleaning the image up, this method only tries to clean the image\n up, thereby maximizing the accuracy. Also, instead of only using a fix threshold, all\n possible thresholds, starting with `self.threshold` will be tried, until one run yields a\n result.\n\n Args:\n image: The image to detect a QR code in\n\n\n Returns:\n A numpy float array of coordinates if a QR code is found, otherwise it returns None.\n \"\"\"\n\n thresh = self.threshold\n coordinates = None\n while coordinates is None:\n coordinates = self.detect_qr_code(image, no_shrink=True, overwrite_threshold=thresh)\n thresh += 1\n if thresh == 256:\n thresh = 0\n if thresh == self.threshold - 1:\n return None\n return coordinates\n\n\n def decode_qr_code(self, image, coordinates):\n \"\"\"Given an image and the coordinates of a QR code, returns the string of the QR code.\n\n Args:\n image: An image with a QR code\n coordinates: Coordinates found with `self.detect_qr_code()` or\n `self.detect_qr_code_slow()`\n\n Returns:\n The string encoded in the QR code, or an empty string, if no string could be decoded.\n \"\"\"\n\n return self._qr_instance.decode(image, coordinates)[0]\n\n\n\nclass QRKlausurProcessor:\n \"\"\"Processeses one single pdf file containing all the scanned exams.\n\n For each student, a folder is created containing a pdf file for each\n part defined. A student is identified by a QR code in the top left\n part of the cover sheet.\n\n If a student cannot be identified, the exam will be placed in a seperate folder\n and a warning will be issued. This exam needs to be processed manually.\n\n Some errors, that can occur in the scanning process will be fixed however. If\n an exam has fewer pages than defined, because the scanner skipped a page for some reason,\n the rest of the exams wont be aligned to the total number of pages per exam. In such a case\n an offset is calculated going forward and the problematic exam will be put in a seperate folder\n for manual processing.\n\n Attributes:\n pdf_file: A pymupdf-object, containing all the exams in sequence\n destpath: The output folder. If it does not exist, it will be created.\n parts: A list of tupels describing the parts of the exam together\n with the first and last page of the part. E.g.\n [(\"Cover\", 0, 2), (\"Task 1\", 2, 4), (\"Bonuspage_1\", 4, 6), (\"Bonuspage_2\", 6, 8)]\n qr_scanner: An instance of `QRScanner`\n \"\"\"\n\n def __init__(self, filename, destpath, parts, hashing, qr_scanner):\n \"\"\"Initializes the object.\n\n Opens the file `filename` as a pymupdf-object. If the number of pages in the input document\n is not divisible by the number of pages per exam (given by the `parts` list), a warning is\n issued. Manual intervention may be needed.\n\n Args:\n filename: The name of the file to open\n destpath: see corresponding Attribute\n parts: see corresponding Attribute\n qr_scanner: see corresponding Attribute\n\n Raises:\n RuntimeError, if filename does not exist or is not readable\n \"\"\"\n self.qr_scanner = qr_scanner\n self.pdf_file = fitz.open(filename)\n self.destpath = destpath\n self.hashing = hashing\n self._offset = 0\n self._old_string = None\n\n self.parts = parts\n self._nrpages = self.parts[-1][2] + 1\n if self.pdf_file.pageCount % self._nrpages:\n logging.warning(\n \"Number of pages of the scan is not divisible by the number of pages of the exam\"\n )\n\n\n def process_exams(self):\n \"\"\"Processes all exams in order\"\"\"\n\n for examnr in range(math.ceil(self.pdf_file.pageCount / self._nrpages)):\n try:\n while not self.process_exam(examnr):\n pass\n except NoCoverFoundException:\n logging.error(\"Could not find cover for exam %d, giving up.\", examnr)\n\n\n def process_exam(self, examnr):\n \"\"\"Processes a single exam.\n\n Its QR code is decoded and all parts will be splitted. The resulting parts are stored in\n folders for each part, named after the sha256sum of the student id to ensure pseudonymity.\n\n If the scanned exam includes the backsides, all parts (except the cover) will also contain\n the backside of the last page of the last part, meaning this page will occur in more than\n one pdf.\n\n If the last part consist of bonus pages, each of this pages will be a seperate pdf file.\n\n The page number of the cover page is computed by multiplying the `examnr` with the number\n of pages per exam. If no QR code is found at the cover page, a warning is issued. All pages\n up to and excluding the last cover page will be scanned for QR codes. If such a code is\n found, it will be assumed, that the previos exam has missing pages. Going forward the page\n numbers of the cover pages will be computed with an offset.\n\n Args:\n examnr: The running number of the exam\n\n Returns:\n True, if a code was Found on the first + self._offset page of the exam\n False, if no code was found\n\n Raises:\n NoCoverFoundException: If the offset matches the length of the exam, it is assumed,\n that there is no valid cover for this exam, and this exception\n is raised.\n \"\"\"\n\n first_page = examnr * self._nrpages + self._offset\n try:\n image = self.get_image_for_page(first_page)\n student_id = self.qr_scanner.get_qr_string(image)\n self._old_string = student_id\n print(f\"Exam number {examnr}, studentid: {student_id}\")\n if student_id == \"\":\n logging.warning(\"Empty QR String detected!\")\n student_id = uuid.uuid4()\n #student_path = os.path.join(self.destpath, student_id)\n for part_name, part_start, part_end in self.parts:\n part_folder = os.path.join(self.destpath, part_name)\n if self.hashing:\n hash_id = hashlib.sha256(bytes(student_id, encoding='utf8')).hexdigest()\n else:\n hash_id = student_id\n os.makedirs(part_folder, exist_ok=True)\n part_path = os.path.join(part_folder, f\"{hash_id}_{part_name}.pdf\")\n\n part_pdf = fitz.open()\n part_pdf.insertPDF(self.pdf_file\n , from_page=first_page + part_start\n , to_page=first_page + part_end)\n if os.path.exists(part_path):\n uuid_name = uuid.uuid4()\n logging.warning(f\"File {hash_id}_{part_name}.pdf already exists, saving as {hash_id}_{part_name}_{uuid_name}.pdf\")\n part_path = os.path.join(part_folder, f\"{hash_id}_{part_name}_{uuid_name}.pdf\")\n print(f\"{part_path} - {part_start}:{part_end}\")\n part_pdf.save(part_path)\n return True\n\n except NoQRCodeFoundException:\n if examnr == 0:\n logging.warning(\"No QR code found on page %d. Skipping pages, until we find a \"\n \"scannable cover page\", first_page)\n if self._offset == self._nrpages:\n self._offset = 0\n raise NoCoverFoundException()\n self._offset += 1\n else:\n logging.warning(\"No QR code found on page %d. This may be due to an inconsistent \"\n \"number of pages from a previous scan. Please check exam %d (%s) \"\n \"manually.\",\n first_page, (examnr - 1), self._old_string)\n if self._offset == -self._nrpages:\n self._offset = 0\n raise NoCoverFoundException()\n self._offset -= 1\n return False\n\n\n\n def get_image_for_page(self, pagenr):\n \"\"\"Renders the page `pagenr` and returns a numpy array containing the imagedata\"\"\"\n\n #images = self.pdf_file.getPageImageList(pagenr)\n #print(images)\n #if len(images) == 0:\n # raise RuntimeError(f\"Could not find an image on page {pagenr}\")\n #if len(images) > 1:\n # raise RuntimeError(f\"Multiple images on page {pagenr}\")\n #pix = fitz.Pixmap(self.pdf_file, images[0][0])\n pix = self.pdf_file[pagenr].getPixmap()\n image = np.frombuffer(pix.samples, np.uint8).reshape(pix.h, pix.w, pix.n)\n return np.ascontiguousarray(image[..., [2, 1, 0]]) # rgb to bgr\n\ndef create_parts(part_strings, is_double_sided, last_part_is_bonus):\n \"\"\"Takes a list of parts with lengths encoded in a string and creates a list of tupels with\n start and end page\"\"\"\n\n part_lengths = [(part[0], int(part[1])) for part in [part.split(\":\") for part in part_strings]]\n if last_part_is_bonus:\n bonus_parts = \\\n [(part_lengths[-1][0] + \"_\" + str(page), 1) for page in range(part_lengths[-1][1])]\n part_lengths = part_lengths[:-1] + bonus_parts\n parts = []\n part_start = 0\n for part_name, part_length in part_lengths:\n part_end = part_start + (part_length - 1)\n\n if is_double_sided:\n part_end += part_length\n if part_start != 0:\n part_start -= 1\n parts += [(part_name, part_start, part_end)]\n part_start = part_end + 1\n return parts\n\ndef parse_cropping(crop_string, no_crop):\n \"\"\"Parses a string formatted like \"x1,y1:x2,y2\" into the tuple ((x1,y1), (x2,y2))\n where x1, x2, y1, y2 are floating point numbers.\"\"\"\n\n if no_crop:\n return None\n coord_x, coord_y = crop_string.split(\":\")\n start_x, end_x = coord_x.split(\",\")\n start_y, end_y = coord_y.split(\",\")\n return ((float(start_x), float(end_x)), (float(start_y), float(end_y)))\n\ndef run(filename, destpath, parts, hashing, config):\n \"\"\"The main function. Given all neccessary parameters, processes every exam in the pdf\"\"\"\n\n qr_scanner = QRScanner(**config)\n qr_klausur_processor = QRKlausurProcessor(filename, destpath, parts, hashing, qr_scanner)\n qr_klausur_processor.process_exams()\n\ndef main():\n \"\"\"Parses the arguments and calls run\"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Split a scanned examfile into folders for each student and tasks in \"\n \"seperate pdf files. The input PDF should have a QR code identifying the \"\n \"student on the cover of each exam.\"\n , epilog=f\"Example of use:\\n\\t{sys.argv[0]} -p cover:1 -p task1:2 -p task2:3 \"\n \"-p task3:1 -p bonus:3 -b -d scanned_exams.pdf ~/splitexams/\\n\"\n , formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\"-t\", \"--threshold\", type=int, default=127,\n help=\"Set the threshold for the cleanup phase. Values range from 0 to \"\n \"255. Lower value means more grey is detected as white, higher \"\n \"value means more grey is detected as black. Only impacts the QR \"\n \"detection phase. Default: 127\")\n parser.add_argument(\"-s\", \"--shrink\", type=int, default=600,\n help=\"Width of the image in pixel for the QR code detection phase. \"\n \"Lower value means more performance, higher value means more \"\n \"accuracy. Only impacts the QR detection phase, does not affect \"\n \"the output PDF. Default: 600\")\n parser.add_argument(\"-c\", \"--crop\", default=\"0,0.5:0.5,1\",\n help=\"Fractional coordinates, that represent the area of the coverpage, \"\n \"where the QR code is located. The format is \\\"x1,y1:x2,y2\\\" where \"\n \"x1, x2, y1, y2 are values from 0 to 1. \"\n \"Default is \\\"0,0.5:0.5,1\\\" which corresponds to the top right \"\n \"quartal of the image.\")\n parser.add_argument(\"-T\", \"--no-cleanup\", action=\"store_true\", help=\"Skip cleanup phase\")\n parser.add_argument(\"-S\", \"--no-shrink\", action=\"store_true\", help=\"Skip shrinking phase\")\n parser.add_argument(\"-C\", \"--no-crop\", action=\"store_true\", help=\"Skip cropping phase\")\n parser.add_argument(\"-H\", \"--hash\", action=\"store_true\", help=\"Hash the resulting filename\")\n parser.add_argument(\"-p\", \"--part\", required=True, action=\"append\",\n help=\"Name and amount of pages for the parts of the exam, seperated by \"\n \"a colon. Can (and should) be issued multiple times (Example \\\"-p \"\n \"Cover:1 -p Task1:2\\\"). Should also include a part for the cover \"\n \"sheet, and empty bonus sheets. The number of pages should not \"\n \"include the backsides, if the document is scanned in duplex. Use \"\n \"--is-double-sided instead\")\n parser.add_argument(\"-d\", \"--is-double-sided\", action=\"store_true\", help=\"The exam is \"\n \"scanned in duplex. The split output pdf will also include the last \"\n \"backside of the previous part, since students tend to also write \"\n \"solutions there\")\n parser.add_argument(\"-b\", \"--last-part-is-bonus\", action=\"store_true\",\n help=\"The last part are bonus sheets. There will be a seperate pdf for \"\n \"each bonus sheet.\")\n parser.add_argument(\"pdf\", help=\"The pdf file to split\")\n parser.add_argument(\"dest\", default=\".\", help=\"Destination folder. Default is the current \"\n \"directory\", nargs='?')\n\n args = parser.parse_args()\n if args.no_shrink:\n args.shrink = None\n if args.no_cleanup:\n args.thresh = None\n\n args_parts = create_parts(args.part, args.is_double_sided, args.last_part_is_bonus)\n config = {\"shrink\": args.shrink,\n \"crop\": parse_cropping(args.crop, args.no_crop),\n \"threshold\": args.threshold}\n\n\n run(args.pdf, args.dest, args_parts, args.hash, config)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"christofsteel/split_qr_exam","sub_path":"split_qr_exam/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8283448116","text":"#!/usr/bin/env python\n\"\"\"Python script to display current weather conditions and next\nbus time on Papirus ePaper LCD display\n\"\"\"\n\nimport current_weather\nimport bus_times\nfrom papirus import Papirus\nimport bitmaps\nfrom PIL import Image\nimport numpy as np\nimport time\nimport datetime\nimport logging\n\nlogging.basicConfig(\n\tfilename='logfile.txt',\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n)\n\nlogging.info(\"-------------- display_weather.py --------------\")\nlogging.info(\"Displays current weather and next bus time to Papirus\")\nlogging.info(\"LCD display.\")\n\n# Connect to Papirus LCD display\ndisplay = Papirus()\n\nlogging.info(\"Display size: %s\", display.size)\nlogging.info(\"PIL.Image version: %s\", Image.VERSION)\nlogging.info(\"numpy version: %s\", np.__version__)\n\nbitmaps.display_size = display.size\n\n# City id for Vancouver, BC\ncity_id = 6173331\ntemp_units = u'\\xb0C' # deg Celcius symbol\nrefresh_rate = 5*60\n\nprevious_text = None\n\nwhile True:\n\n # First message is current date and time\n messages = [datetime.datetime.now().strftime(u\"%a %b %-d, %H:%M\")]\n\n # Get current weather conditions\n error_message, weather = current_weather.get_current_weather(city_id)\n\n if not error_message:\n temp = weather['main']['temp'] - 273.15\n description = weather['weather'][0]['description']\n messages.append(u\"{0:.0f}{1} {2}\".format(temp, temp_units, description))\n else:\n logging.debug(\"Error reading weather data from.\")\n messages.append(u\"WEATHER ERROR\")\n\n # Get next #13 bus departure time\n bus16_no, bus16_time = bus_times.get_next_bus_time(stop_number=51034)\n\n # Get next #33 bus westbound departure time\n bus33_no, bus33_time = bus_times.get_next_bus_time(stop_number=61128)\n messages.append(\"Buses: %s @ %s, %s @ %s\" % (bus16_no, bus16_time,\n bus33_no, bus33_time))\n\n text = u\"\\n\".join(messages)\n if text != previous_text:\n im, char_count = bitmaps.display_text_prop(text,\n char_set=\"unicode\",\n char_size=1)\n\n # Note: np.asarray(im) does not\n # currently work due to a bug in PIL\n\n im.putdata([p^255 for p in im.getdata()])\n\n display.display(im)\n display.update()\n logging.info(\"Display updated.\")\n\n previous_text = text\n\n time.sleep(refresh_rate)\n\n","repo_name":"billtubbs/papirus-zero","sub_path":"display_weather.py","file_name":"display_weather.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2562646460","text":"import os\nimport tensorflow as tf\nimport time\n\nfrom tensorflow.keras.layers import Dense, Flatten,Reshape\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers.legacy import Adam\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport tensorflow.keras.backend as K\n\ndef mpjpe(y_true, y_pred):\n # Compute the mean per-joint position error\n return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(y_true - y_pred), axis=-1)))\n\nnoOfFrames = 1 # should always be odd number\ngap = 1\nassert noOfFrames % 2 == 1\n\ntf.keras.mixed_precision.set_global_policy('mixed_float16')\n\ndef create_model(output_shape):\n input_shape=(noOfFrames, 17, 2)\n print(\"input_shape\", input_shape)\n print(\"output_shape\", output_shape)\n num_elm =output_shape[0]*output_shape[1]\n\n # Define the encoder\n encoder_input = tf.keras.layers.Input(shape=input_shape)\n x = tf.keras.layers.Flatten()(encoder_input)\n x = tf.keras.layers.Dense(512, activation='relu')(x)\n encoder_output = tf.keras.layers.Dense(32, activation='relu')(x)\n encoder = tf.keras.models.Model(encoder_input, encoder_output)\n\n # Define the decoder\n decoder_input = tf.keras.layers.Input(shape=(32,))\n x = tf.keras.layers.Dense(512, activation='relu')(decoder_input)\n x = tf.keras.layers.Dense(num_elm, activation='linear')(x)\n decoder_output = tf.keras.layers.Reshape(output_shape)(x)\n decoder = tf.keras.models.Model(decoder_input, decoder_output)\n\n # Define the full autoencoder model\n autoencoder_input = tf.keras.layers.Input(shape=input_shape)\n encoded = encoder(autoencoder_input)\n decoded = decoder(encoded)\n model = tf.keras.models.Model(autoencoder_input, decoded)\n\n model.compile(loss=mpjpe,optimizer=Adam())\n return model\n\nrelative_model = create_model((16, 3))\nroot_model = create_model((1, 3))\n\nrel_checkpoint_dir = os.path.dirname('rel-checkpoint/cp-{epoch:04d}.ckpt')\nrel_latest = tf.train.latest_checkpoint(rel_checkpoint_dir)\nrelative_model.load_weights(rel_latest)\n\nroot_checkpoint_dir = os.path.dirname('root-checkpoint/cp-{epoch:04d}.ckpt')\nroot_latest = tf.train.latest_checkpoint(root_checkpoint_dir)\nroot_model.load_weights(root_latest)\n\nfrom demo.vis import get_pose2D\nfrom common.camera import *\n\n# https://raw.githubusercontent.com/Vegetebird/StridedTransformer-Pose3D/main/demo/video/sample_video.mp4\n\n\nfrom flask import Flask, jsonify, request\nimport re\n\napp = Flask(__name__)\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n start = time.time()\n # Get the data from the POST request.\n data = request.get_json(force=True)\n # validate data\n if not data['url']:\n return jsonify({'error': 'url is required'}), 400\n print(\"url: \", data['url'])\n # check if valid url using regex, return error if not valid\n # if not re.match(r'^https?:\\/\\/.*\\.(?:mp4|avi|mov)$', data['url']):\n # return jsonify({'error': 'url is not valid'}), 400\n \n test_keypoints, test_width, test_height, test_fps, test_length = get_pose2D(data['url'])\n\n for cam_idx, kps in enumerate(test_keypoints):\n # Normalize camera frame\n kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=test_width, h=test_height)\n test_keypoints[cam_idx] = kps\n\n pos_2d = []\n framesUpToMiddle = noOfFrames // 2\n for i in range(test_keypoints[0].shape[0]):\n if (i-framesUpToMiddle*gap < 0 or i+framesUpToMiddle*gap >= test_keypoints[0].shape[0]):\n continue\n pos_2d.append(test_keypoints[0][i-framesUpToMiddle*gap:i+framesUpToMiddle*gap+1:gap])\n\n rel_prediction = relative_model.predict(np.array(pos_2d))\n root_prediction = root_model.predict(np.array(pos_2d))\n root_prediction *= np.array([3.5331593, 1.8774002, 5.07106])\n root_prediction += np.array([-1.6017656, -1.2015667, 2.5101337])\n print(rel_prediction.shape)\n print(root_prediction.shape)\n prediction = np.concatenate((root_prediction, rel_prediction), axis=1)\n prediction[:, 1:] += prediction[:, :1]\n\n print(\"prediction success\")\n\n cam = {\n 'orientation': np.array([0.1407056450843811, -0.1500701755285263, -0.755240797996521, 0.6223280429840088], dtype='float32'),\n 'translation': np.array([1841.1070556640625, 4955.28466796875, 1563.4454345703125], dtype='float32'),\n }\n prediction = camera_to_world(prediction.astype(np.float32), R=cam['orientation'], t=cam['translation'])\n end = time.time()\n print(\"Time took to process: \", end - start)\n print(\"Resolution \", \"{width}x{height}\".format(width=test_width, height=test_height))\n print(\"No of frames: \", test_length)\n print(\"FPS: \", test_fps)\n return jsonify({'prediction': prediction.tolist(), 'fps': test_fps }), 200\n\n# example request\n# curl -X POST -H \"Content-Type: application/json\" -d '{\"url\": \"https://raw.githubusercontent.com/Vegetebird/StridedTransformer-Pose3D/main/demo/video/sample_video.mp4\"}' http://localhost:5000/predict\n\nif __name__ == '__main__':\n app.run(debug = True, host='0.0.0.0', port=5000)\n\n\n","repo_name":"AnushkaMadushanka/mocap-hpe","sub_path":"hpe-model-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25159762805","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.autograd import Function\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom scipy.linalg import orth\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nclass ScribbleLoss(nn.Module):\n\t\"\"\"docstring for ScribbleLoss\"\"\"\n\tdef __init__(self, n_classes):\n\t\tsuper(ScribbleLoss, self).__init__()\n\t\tself.sigmoid = nn.Sigmoid()\n\t\tself.n_classes = n_classes - 1\n\n\tdef forward(self, inputs, trimaps):\n\t\tn_batches = inputs.size()[0]\n\t\tloss = Variable(torch.zeros((n_batches, 1)), requires_grad=True).cuda()\n\t\tfor n in range(n_batches):\n\t\t\ttrimap = trimaps[n]\n\t\t\tmask = torch.zeros(trimap.shape).cuda()\n\t\t\tmask[trimap == 255] = 1\n\n\t\t\t# cprob = self.sigmoid(inputs[n][self.n_classes])\n\t\t\tcprob = inputs[n][self.n_classes]\n\t\t\talpha = cprob * mask\n\t\t\tloss[n] = (mask - alpha).sum() / len(torch.where(mask!=0)[0])\n\t\treturn loss.sum()/n_batches\n\nclass SoftCrossEntropy2d(nn.Module):\n\n\tdef __init__(self, reduction='mean', ignore_label=255):\n\t\tsuper(SoftCrossEntropy2d, self).__init__()\n\t\tself.reduction = reduction\n\t\tself.ignore_label = ignore_label\n\t\tself.log_softmax = nn.LogSoftmax(dim=1)\n\n\t\"\"\"\n\t\tL = \\frac{1}{batch_size} \\sum_{j=1}^{batch_size} \\sum_{i=1}^{N}y_{ji}log(p_{ji})\n\t\tN - total number of pixels\n\t\"\"\"\n\tdef forward(self, predict, target, reduction='mean'):\n\t\t\"\"\"\n\t\t\tArgs:\n\t\t\t\tpredict:(batch, c, h, w)\n\t\t\t\ttarget:(batch, c, h, w)\n\t\t\t\tweight (Tensor, optional): a manual rescaling weight given to each class.\n\t\t\t\t\t\t\t\t\t\t If given, has to be a Tensor of size \"nclasses\"\n\t\t\"\"\"\n\t\tassert not target.requires_grad\n\t\tassert predict.dim() == 4\n\t\tassert predict.size(0) == target.size(0), \"channle: {0} vs {0} \".format(predict.size(0), target.size(0))\n\t\tassert predict.size(1) == target.size(1), \"channle: {1} vs {1} \".format(predict.size(1), target.size(1))\n\t\tassert predict.size(2) == target.size(2), \"channle: {2} vs {2} \".format(predict.size(2), target.size(2))\n\t\tassert predict.size(3) == target.size(3), \"channle: {3} vs {3} \".format(predict.size(3), target.size(3))\n\t\tlog_prob = self.log_softmax(predict)\n\t\tlog_prob = log_prob.view(log_prob.size(0), -1)\n\t\tbatch_loss = - torch.sum(target.view(target.size(0), -1) * log_prob, dim=1) / (predict.size(2) * predict.size(3))\n\t\tif reduction == 'none':\n\t\t\treturn batch_loss\n\t\telif reduction == 'mean':\n\t\t\treturn torch.mean(batch_loss)\n\t\telif reduction == 'sum':\n\t\t\treturn torch.sum(batch_loss)\n\t\telse:\n\t\t\traise NotImplementedError('Unkown reduction mode')\n\nclass BinaryDiceLoss(nn.Module):\n\t\"\"\"Dice loss of binary class\n\tArgs:\n\t\tsmooth: A float number to smooth loss, and avoid NaN error, default: 1\n\t\tp: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2\n\t\tpredict: A tensor of shape [N, *]\n\t\ttarget: A tensor of shape same with predict\n\t\treduction: Reduction method to apply, return mean over batch if 'mean',\n\t\t\treturn sum if 'sum', return a tensor of shape [N,] if 'none'\n\tReturns:\n\t\tLoss tensor according to arg reduction\n\tRaise:\n\t\tException if unexpected reduction\n\t\"\"\"\n\tdef __init__(self, smooth=1, p=2, reduction='mean'):\n\t\tsuper(BinaryDiceLoss, self).__init__()\n\t\tself.smooth = smooth\n\t\tself.p = p\n\t\tself.reduction = reduction\n\n\tdef forward(self, predict, target):\n\t\tassert predict.shape[0] == target.shape[0], \"predict & target batch size don't match\"\n\t\tpredict = predict.contiguous().view(predict.shape[0], -1)\n\t\ttarget = target.contiguous().view(target.shape[0], -1)\n\n\t\tnum = torch.sum(torch.mul(predict, target), dim=1) + self.smooth\n\t\tden = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth\n\n\t\tloss = 1 - num / den\n\n\t\tif self.reduction == 'mean':\n\t\t\treturn loss.mean()\n\t\telif self.reduction == 'sum':\n\t\t\treturn loss.sum()\n\t\telif self.reduction == 'none':\n\t\t\treturn loss\n\t\telse:\n\t\t\traise Exception('Unexpected reduction {}'.format(self.reduction))\n\n\nclass DiceLoss(nn.Module):\n\t\"\"\"Dice loss, need one hot encode input\n\tArgs:\n\t\tweight: An array of shape [num_classes,]\n\t\tignore_index: class index to ignore\n\t\tpredict: A tensor of shape [N, C, *]\n\t\ttarget: A tensor of same shape with predict\n\t\tother args pass to BinaryDiceLoss\n\tReturn:\n\t\tsame as BinaryDiceLoss\n\t\"\"\"\n\tdef __init__(self, weight=None, logit=\"sigmoid\", ignore_index=None, **kwargs):\n\t\tsuper(DiceLoss, self).__init__()\n\t\tself.kwargs = kwargs\n\t\tself.weight = weight\n\t\tself.ignore_index = ignore_index\n\t\tself.logit = logit\n\n\n\tdef forward(self, predict, target):\n\t\tassert predict.shape == target.shape, 'predict & target shape do not match'\n\t\tdice = BinaryDiceLoss(**self.kwargs)\n\t\ttotal_loss = 0\n\t\tif self.logit == 'sigmoid':\n\t\t\tpredict = F.sigmoid(predict)\n\t\telse:\n\t\t\tpredict = F.softmax(predict, dim=1)\n\n\t\tfor i in range(target.shape[1]):\n\t\t\tif i != self.ignore_index:\n\t\t\t\tdice_loss = dice(predict[:, i], target[:, i])\n\t\t\t\tif self.weight is not None:\n\t\t\t\t\tassert self.weight.shape[0] == target.shape[1], \\\n\t\t\t\t\t\t'Expect weight shape [{}], get[{}]'.format(target.shape[1], self.weight.shape[0])\n\t\t\t\t\tdice_loss *= self.weights[i]\n\t\t\t\ttotal_loss += dice_loss\n\n\t\treturn total_loss/target.shape[1]\n\n\nif __name__ == '__main__':\n\tce_torch = nn.NLLLoss()\n\tce_own = SoftCrossEntropy2d()\n\tloss = nn.MultiLabelSoftMarginLoss()\n\n\tdata_np = np.random.uniform(0, 1, size=(3,2,5,5))\n\ttarget_np = np.random.randint(0, 2, size=(3,1,5,5))\n\ttarget_oh = np.zeros(data_np.shape)\n\n\ttarget_oh[0][0][target_np[0][0] == 0] = 1\n\ttarget_oh[0][1][target_np[0][0] == 1] = 1\n\ttarget_oh[1][0][target_np[1][0] == 0] = 1\n\ttarget_oh[1][1][target_np[1][0] == 1] = 1\n\ttarget_oh[2][0][target_np[2][0] == 0] = 1\n\ttarget_oh[2][1][target_np[2][0] == 1] = 1\n\n\ttarget_torch = torch.from_numpy(target_np).type(torch.LongTensor)\n\ttarget_torch = target_torch.squeeze(1)\n\ttarget_own = torch.from_numpy(target_oh).type(torch.FloatTensor)\n\tdata = torch.from_numpy(data_np).type(torch.FloatTensor)\n\tdata.requires_grad = True\n\n\tlog_data = nn.LogSoftmax(dim=1)(data)\n\toutput_torch = ce_torch(log_data, target_torch)\n\toutput_torch.backward()\n\tprint('torch loss: ', output_torch)\n\n\toutput_multi = loss(data, target_own)\n\tprint('multi loss: ', output_multi)\n\n\tprint(data.size(), target_own.size())\n\toutput_own = ce_own(data, target_own)\n\toutput_own.backward()\n\tprint(\"own loss: \", output_own)\n\n","repo_name":"yaokmallorca/Closed-Form-Loss-for-WSSS","sub_path":"utils/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30086223519","text":"from utils import read_config\nfrom pathlib import Path\nfrom sqlalchemy import create_engine, MetaData, Table, Column, Integer, String\nimport fiona\nfrom shapely.geometry import shape\nimport subprocess\n\nif __name__ == '__main__':\n\n config = read_config('config.ini')\n\n # Create SQLite Database\n if not Path(config['Path']['Database']).is_file():\n \n engine = create_engine('{}:///{}'.format(config['Database']['Engine'], config['Path']['Database']), echo = True)\n meta = MetaData()\n\n products = Table(\n '{}'.format(config['Database']['Table']), meta, \n Column('id', String, primary_key = True), \n Column('title', String), \n Column('filename', String),\n Column('beginposition', String),\n Column('endposition', String),\n Column('orbitnumber', Integer),\n Column('orbitdirection', String),\n Column('footprint', String),\n Column('info_link', String),\n Column('dl_link', String),\n Column('checksum', String),\n Column('downloaded', Integer),\n Column('preprocessed', Integer),\n Column('indexed', Integer),\n Column('detected', Integer),\n sqlite_with_rowid=False\n )\n\n meta.create_all(engine)\n\n\n # Create folders\n app_folders = [\n config['Path']['Temporary'],\n config['Path']['Download'],\n config['Path']['Preprocess'],\n config['Path']['Results']\n ]\n\n for folder_path in app_folders:\n Path(folder_path).mkdir(parents=True, exist_ok=True)\n\n # Check if SHP file exists\n if Path(config['Preprocess']['SHP']).is_file():\n # Create simplified polygon for Scihub API call\n with fiona.open(config['Preprocess']['SHP']) as c:\n polygon = next(iter(c))['geometry']\n polygon_shapely = shape(polygon)\n\n polygon_simplified = str(polygon_shapely.simplify(0.08))\n\n file_api = \"{}_api.txt\".format(config['Preprocess']['SHP'][:-4])\n with open(file_api, 'w') as smpl_fl:\n smpl_fl.write(polygon_simplified)\n else:\n print('File {} does not exist.'.format(config['Preprocess']['SHP']))\n\n \n # Check if trained model exists\n if not Path(config['Predict']['Model']).is_file():\n print('File {} does not exist.'.format(config['Predict']['Model']))\n \n # Add product to datacube\n subprocess.call(\"{}bin/datacube product add {}\".format(config['Path']['Venv'], config['Preprocess']['Product_File']), shell=True)","repo_name":"akatsos/ntua-water-detection-system","sub_path":"Code/init_app.py","file_name":"init_app.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12844813865","text":"import time \n\n\nproblem_number = 62\ntest_input = 3\ntest_solution = 41063625 \nproblem_input = 5\n\n\ndef is_permutation(a, b):\n if len(a) == 0 and len(b) == 0:\n return(True)\n elif len(a) != len(b):\n return(False)\n if a[0] in b:\n return(is_permutation(a[1:], b[:b.index(a[0])]+b[b.index(a[0])+1:]))\n else:\n return(False)\n\n\n#Solution\n\n\ndef solution(limit):\n max_permutation = 1\n max_number = 0\n cubic_index = 3\n cubic_exp = 1\n \n while max_permutation < limit and cubic_exp <= 11:\n cubic_exp += 1\n cube = []\n while cubic_index ** 3 < 10**cubic_exp:\n cube.append(cubic_index **3)\n cubic_index += 1\n \n for num in cube:\n number = 0\n for comp in cube:\n if is_permutation(str(num), str(comp)):\n number += 1\n if number > max_permutation:\n max_number = num\n max_permutation = number\n \n return max_number\n \n\n#Test & Result\n\n\nfichier = open(\"Solution \"+str(problem_number)+\".txt\", \"w\")\nstring = \"\"\n\nbegin_test = time.time()\ntest_value = solution(test_input)\nend_test = time.time()\ntest_time = end_test - begin_test\n\nstring += \"TEST #1\\n\\n\"\nstring += \"Input: \"+str(test_input)+\"\\n\"\nstring += \"Output: \"+str(test_value)+\"\\n\"\nstring += \"Answer: \"+str(test_solution)+\"\\n\"\nstring += \"Computation time: \"+str(test_time)+\" sec\\n\"\nstring += \"Verification: \"\n\nif(test_value == test_solution):\n string += \"TRUE\"\nelse:\n string += \"FALSE\"\n \n\nbegin_problem = time.time()\nproblem_value = solution(problem_input)\nend_problem = time.time()\nproblem_time = end_problem - begin_problem\n\nstring += \"\\n\\n\\nRESULT PROBLEM #\"+str(problem_number)+\"\\n\\n\"\nstring += \"Input: \"+str(problem_input)+\"\\n\"\nstring += \"Output: \"+str(problem_value)+\"\\n\"\nstring += \"Computation time: \"+str(problem_time)+\" sec\\n\"\n\nstring += \"\\n\\n\\nCurrent date & time: \" + time.strftime(\"%c\")\n\nfichier.write(string)\nfichier.close()\n","repo_name":"FrancoisdeFouchecour/Projet-Euler","sub_path":"Problems/62-Problem/Problem 62.py","file_name":"Problem 62.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74842748711","text":"import cv2\nfrom darkflow.net.build import TFNet\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nimport json\nimport pika\nimport os.path\nfrom pathlib import Path\nfrom ftplib import FTP\n\noptions = {\n\t'model': 'cfg/yolo-obj.cfg',\n\t'load': 'bin/yolo-obj.weights',\n# 'model': 'cfg/yolo.cfg',\n# 'load': 'bin/yolo.weights',\n 'threshold': 0.15,\n 'gpu': 0.5\n}\n\n#inisiasi pika dan rmq\ncredentials = pika.PlainCredentials('turret.tank030', 'TA171801030')\nparameters = pika.ConnectionParameters('167.205.7.226',5672,'/turret',credentials)\n\nconnection = pika.BlockingConnection(parameters)\nchannel = connection.channel()\n\nchannel.queue_declare(queue='servertoholo', durable=True)\n\nchannel.queue_declare(queue='notif', durable=True)\n\nchannel.exchange_declare(exchange='ex_servertoholo', exchange_type='topic', durable=True)\n\nchannel.queue_bind(queue='servertoholo',\n\t\t exchange='ex_servertoholo',\n\t\t routing_key='rk_servertoholo'\n\t\t)\n\t\t\n#capture image\ntfnet = TFNet(options)\n\n#inisialisasi FTP\n#ftp = FTP('167.205.7.226')\n#ftp.login('ftpimageproc|imageproc', 'Img0305@')\n\ndef callback(ch, method, properties, body):\n\tprint(body[0])\n\tprint(\" [x] Received %s\" %body)\n\tif body[0] == 49:\n\t\t#fhandle = open('dataset/tes.jpg', 'wb')\n\t\t#ftp.retrbinary('RETR ' + 'tes.jpg', fhandle.write) \n\t\tprint('Masuk body 1')\n\t\t\n\t\timg = cv2.imread(\"dataset/tes.jpg\", cv2.IMREAD_COLOR)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\t\tresults = tfnet.return_predict(img)\n\t\t\n\t\tprint('Result :', results)\n\t\tprint(' ')\n\n\n\t\t#print('results :',results)\t\n\t\t\n\t\tsum_box = 0\n\t\tfor result in zip(results):\n\t\t\t#print('result :',result)\n\t\t\tsum_box += 1\n\n\t\t#print('sum box :',sum_box)\n\n\t\tif (sum_box == 0) :\n\t\t\tdata = {\n\t\t\t\"sum_box\":\"%d\"%0,\n\t\t\t\"tlx\":\"%d\"%0,\n\t\t\t\"tly\":\"%d\"%0,\n\t\t\t\"brx\":\"%d\"%0,\n\t\t\t\"bry\":\"%d\"%0}\n\t\t\n\t\t\tjsonData = json.dumps(data)\n\t\t\tif(bool(data)):\n\t\t\t\tchannel.basic_publish(exchange='ex_servertoholo', routing_key='rk_servertoholo', body=jsonData)\n\t\t\n\t\telse:\t\n\t\t\tfor result in zip(results):\n\t\t\t\ttl = (result[0]['topleft']['x'], result[0]['topleft']['y'])\n\t\t\t\tbr = (result[0]['bottomright']['x'], result[0]['bottomright']['y'])\n\t\t\t\tlabel = result[0]['label']\n\t\t\t\timg = cv2.rectangle(img, tl, br, (0, 255, 0), 7)\n\t\t\t\timg = cv2.putText(img, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)\n\t\t\t\t\n\t\t\t\tdata = {\n\t\t\t\t\"sum_box\":\"%d\"%sum_box,\n\t\t\t\t\"tlx\":\"%d\"%tl[0],\n\t\t\t\t\"tly\":\"%d\"%tl[1],\n\t\t\t\t\"brx\":\"%d\"%br[0],\n\t\t\t\t\"bry\":\"%d\"%br[1]}\n\t\t\n\t\t\t\tjsonData = json.dumps(data)\n\t\t\t\t\n\t\t\t\t#print('tl :',tl)\n\t\t\t\t#print('br :',br)\n\t\t\t\t\n\t\t\tif(bool(data)):\n\t\t\t\tchannel.basic_publish(exchange='ex_servertoholo', routing_key='rk_servertoholo', body=jsonData)\n\t\t\n#\t\ttime.sleep(1)\n\telse :\n\t\t\n\t\ttime.sleep(0.1)\n\t\tprint('Waiting')\n\t\nchannel.basic_consume(callback,\n queue='notif',\n no_ack=True)\nchannel.start_consuming()","repo_name":"naufalinofh/TAfighter","sub_path":"yolo/darkflow-master/tes_image.py","file_name":"tes_image.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34825648438","text":"#!/usr/bin/env python3\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QPushButton, QVBoxLayout, QLabel, QGroupBox, QHBoxLayout\nfrom slider import SliderDisplay\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom msd import MassSpringDamper\nimport math\n\n\ndef pprint(m, s, d, t, ts):\n print('Mass: %s' % m)\n print('Spring: %s' % s)\n print('Damper: %s' % d)\n print('Time(s): %s' % t)\n print('Time Step: %s' % ts)\n print('-'*30)\n\n\nclass Interface(QMainWindow):\n def __init__(self):\n QMainWindow.__init__(self)\n self.TimeBox = QGroupBox()\n self.SysBox = QGroupBox()\n self.BhBox = QGroupBox()\n self.setWindowTitle('Spring-Mass-Damper Playground')\n # A widget to hold everything\n widget = QWidget()\n self.setCentralWidget(widget)\n self.resize(1500, 1000)\n\n # create System Box\n sys_layout = QVBoxLayout()\n self.mass_slider = SliderDisplay('Mass', 0, 10)\n self.spring_slider = SliderDisplay('Spring', 0, 10)\n self.damper_slider = SliderDisplay('Damper', 0, 10)\n # add things to layout\n sys_layout.addWidget(self.mass_slider)\n sys_layout.addWidget(self.spring_slider)\n sys_layout.addWidget(self.damper_slider)\n self.SysBox.setLayout(sys_layout)\n\n # create behavior box\n BH_layout = QHBoxLayout()\n text_label = QLabel('Type:')\n self.type = QLabel('')\n # add to layout\n BH_layout.addWidget(text_label)\n BH_layout.addWidget(self.type)\n BH_layout.addStretch(1)\n self.BhBox.setLayout(BH_layout)\n\n # creat Time Box\n layout = QVBoxLayout()\n self.time_slider = SliderDisplay('Time (s)', 0, 100)\n self.step_slider = SliderDisplay('Time step (s)', 0.001, 0.1)\n self.initial = SliderDisplay('Initial x', -10, 0)\n self.dx = SliderDisplay('Initial dx', 0, 0.1)\n # add things to layout\n layout.addWidget(self.time_slider)\n layout.addWidget(self.step_slider)\n layout.addWidget(self.initial)\n layout.addWidget(self.dx)\n self.TimeBox.setLayout(layout)\n\n # set title label\n label1 = QLabel('System parameters')\n label2 = QLabel('Simulation parameters')\n label3 = QLabel('System Behavior')\n\n # simulate button\n ss_button = QPushButton('Simulate\\nSystem')\n ss_button.clicked.connect(\n lambda : pprint(self.mass_slider.value() / 1000, self.spring_slider.value() / 1000,\n self.damper_slider.value() / 1000, self.time_slider.value() / 1000,\n self.step_slider.value() / 1000)\n )\n ss_button.clicked.connect(self.draw)\n ss_button.clicked.connect(self.findType)\n\n # quit button\n quit_button = QPushButton('Quit')\n quit_button.clicked.connect(app.exit)\n\n # The display for the graph\n self.figure = Figure()\n self.display = FigureCanvas(self.figure)\n self.figure.clear()\n\n # main layout\n mainLayout = QHBoxLayout()\n widget.setLayout(mainLayout)\n # left layout\n left_Layout = QVBoxLayout()\n # add things to left layout\n left_Layout.addWidget(label1)\n left_Layout.addWidget(self.SysBox)\n left_Layout.addStretch(1)\n left_Layout.addWidget(label3)\n left_Layout.addWidget(self.BhBox)\n left_Layout.addStretch(1)\n left_Layout.addWidget(label2)\n left_Layout.addWidget(self.TimeBox)\n left_Layout.addStretch(1)\n left_Layout.addWidget(ss_button)\n left_Layout.addStretch(6)\n left_Layout.addWidget(quit_button)\n left_Layout.addStretch(1)\n\n # set main layout\n mainLayout.addLayout(left_Layout, stretch=1)\n mainLayout.addWidget(self.display, stretch=2)\n widget.setLayout(mainLayout)\n\n def draw(self):\n try:\n self.figure.clear()\n k = self.spring_slider.value() / 1000\n m = self.mass_slider.value() / 1000\n c = self.damper_slider.value() / 1000\n time = self.time_slider.value() / 1000\n ts = self.step_slider.value() / 1000\n ini = self.initial.value() / 1000\n dx = self.dx.value() / 1000\n ax = self.figure.add_subplot(111)\n smd = MassSpringDamper(m, k, c, time, ts)\n # start point at (-1,0)\n state, t = smd.simulate(ini, dx)\n self.z = c / (2*math.sqrt(k*m))\n self.displacement = []\n\n for s in state :\n self.displacement.append(s[0])\n ax.plot(t, self.displacement, 'r-')\n ax.set_title('Spring-Mass-Damper System Behavior\\n'\n 'k = %s, m = %s, c = %s, dt = %s' % (k, m, c, ts)\n )\n ax.set_xlabel('Times(s)')\n ax.set_ylabel('Amplitude')\n self.display.draw()\n except:\n self.z = -1\n\n def findType(self):\n if self.z == 0:\n self.type.setText('Undamped')\n elif 0 <= self.z < 1:\n self.type.setText('Underdamped')\n elif self.z > 1:\n self.type.setText('Overdamped')\n elif self.z == 1:\n self.type.setText('Critically damped')\n else:\n self.type.setText('Wrong parameters set up')\n\n\nif __name__ == '__main__' :\n app = QApplication([])\n\n interface = Interface()\n\n interface.show()\n\n app.exec_()\n","repo_name":"Wang-LC/MECH_SYS","sub_path":"Lab8&9/gui2.py","file_name":"gui2.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71623605034","text":"from flask import Flask, render_template, request, session, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\ndb=SQLAlchemy()\napp=Flask(__name__)\napp.secret_key='super secret key'\napp.config['SQLALCHEMY_DATABASE_URI']=\"mysql://root:@localhost/blog\"\ndb.init_app(app) # try \"pip install mysqlclient\" if showing : no module named mysqldb\n\nclass Contact(db.Model):\n sno = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, nullable=False)\n email = db.Column(db.String, nullable=False)\n phone_num = db.Column(db.Integer, nullable=False)\n msg = db.Column(db.String, nullable=False)\n date = db.Column(db.String, nullable=True)\n\n\nclass Posts(db.Model):\n sno = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String, nullable=False)\n slug = db.Column(db.String, nullable=False)\n content = db.Column(db.String, nullable=False)\n date = db.Column(db.String, nullable=True)\n\n@app.route(\"/\")\ndef home():\n posts=Posts.query.filter_by().all()\n return render_template('index.html', posts=posts)\n\n@app.route('/dashboard',methods=['GET','POST'])\ndef dashboard():\n if ('user' in session and session['user']=='bablu_926'):\n posts=Posts.query.filter_by().all()\n return render_template('dashboard.html', posts=posts)\n \n if (request.method=='POST'):\n user_name=request.form.get('uname')\n user_password=request.form.get('pass')\n if (user_name=='bablu_926' and user_password=='bablu@926'):\n session['user']= user_name\n posts=Posts.query.filter_by().all()\n return render_template('dashboard.html', posts=posts)\n \n return render_template('login.html')\n\n@app.route(\"/logout\")\ndef logout():\n session.pop('user')\n return redirect('/dashboard')\n\n@app.route(\"/post/\", methods=['GET'])\ndef post_route(post_slug):\n post=Posts.query.filter_by(slug=post_slug).first()\n return render_template('post.html',post=post,slug=post_slug)\n\n@app.route(\"/add_post/\", methods=['GET','POST'])\ndef add_post(sno):\n if (request.method==\"POST\"):\n if sno=='0': \n box_title=request.form.get('title')\n slug=request.form.get('slug')\n content=request.form.get('content')\n date=datetime.now()\n post=Posts(title=box_title, slug=slug,content=content,date=date)\n db.session.add(post)\n db.session.commit()\n post=Posts.query.filter_by(sno=sno).first\n return render_template('add_post.html',post=post,sno=sno)\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html')\n\n@app.route(\"/edit/\", methods=['GET','POST'])\ndef edit(sno):\n if ('user' in session and session['user']=='bablu_926'):\n if (request.method==\"POST\"):\n box_title=request.form.get('title')\n slug=request.form.get('slug')\n content=request.form.get('content')\n date=datetime.now()\n \n if (sno=='0'):\n post=Posts(title=box_title, slug=slug,content=content,sno=sno,date=date)\n db.session.add(post)\n db.session.commit()\n else:\n post=Posts.query.filter_by(sno=sno).first()\n post.title=box_title\n post.slug=slug\n post.content=content\n post.date=date\n db.session.commit()\n return redirect('/edit/'+sno)\n post=Posts.query.filter_by(sno=sno).first()\n return render_template('edit.html',post=post,sno=sno)\n \n@app.route(\"/delete/\", methods=['GET','POST'])\ndef delete(sno):\n if ('user' in session and session['user']=='bablu_926'):\n post=Posts.query.filter_by(sno=sno).first()\n db.session.delete(post)\n db.session.commit()\n return redirect('/dashboard')\n\n@app.route(\"/contact\", methods=['GET','POST'])\ndef contact():\n if(request.method=='POST'):\n name=request.form.get('name')\n email=request.form.get('email')\n phone=request.form.get('phone')\n message=request.form.get('message')\n\n entry=Contact(name=name, email=email, phone_num=phone, msg=message,date=datetime.now())\n db.session.add(entry)\n db.session.commit()\n\n return render_template('contact.html')\n\n\napp.run(debug=True)","repo_name":"Babluchauhan/Blogger-Website","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40587762844","text":"\"\"\"URL paths for Organizer App.\"\"\"\nfrom django.urls import path\n\nfrom organizer.views import (\n NewsLinkAPIDetail,\n StartupAPIDetail,\n StartupAPIList,\n TagAPIDetail,\n TagAPIList\n)\n\nurlpatterns = [\n path('startup/', StartupAPIList.as_view(),\n name='startup_list'),\n path('startup//',\n StartupAPIDetail.as_view(), name='api-startup-detail'),\n path('startup///',\n NewsLinkAPIDetail.as_view(), name='api-newslink-detail'),\n path('tag/', TagAPIList.as_view(),\n name='tag_list'),\n path('tag//',\n TagAPIDetail.as_view(), name='api-tag-detail'),\n]\n","repo_name":"moswil/Django-and-DRF-from-scratch","sub_path":"src/organizer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22025127421","text":"import time\nimport vispy.plot as vp\nimport vispy.io as io\nimport numpy as np\n\nimport threading\nimport concurrent.futures as cc\nimport multiprocessing\nimport os\nimport uuid\nimport queue\n\nclass RenderTask:\n def __init__(self, task_f, *args):\n self.id = str(uuid.uuid4())\n self.task_f = task_f\n self.args = args\n self.done = False\n \n def render(self):\n img = self.task_f(*(self.args))\n self.done = True\n return img\n\n\nclass PlotRenderer:\n\n def __init__(self):\n self.n_workers = multiprocessing.cpu_count() - 1\n self.n_tasks = 10\n self.logger = multiprocessing.get_logger()\n self.proc = os.getpid()\n self.queue_in = queue.Queue()\n self.queue_out = queue.Queue()\n\n self.shutdown = False\n\n def push(self, task_f, *args):\n task = RenderTask(task_f, *args)\n self.queue_in.put(task)\n\n \n def try_pop(self):\n return self.queue_out.get()\n \n def executor(self):\n with cc.ThreadPoolExecutor(max_workers=self.n_workers) as executor:\n while not self.shutdown:\n task = self.queue_in.get(block=True)\n # print(f'Received a task {task.id}')\n future = executor.submit(task.render())\n self.queue_out.put(future)\n\nclass Consumer:\n def __init__(self, renderer):\n self.renderer = renderer\n\n def consume(self):\n start = time.time_ns()\n count = 0\n while True:\n count += 1\n renderer.try_pop()\n\n if (time.time_ns() - start) > 1e9:\n print(f'Consumer: {count} {renderer.queue_in.qsize()}', flush=True)\n count = 0\n start = time.time_ns()\n\nif __name__ == '__main__': \n\n renderer = PlotRenderer()\n consumer = Consumer(renderer)\n render_executor = threading.Thread(target=renderer.executor)\n consumer_executor = threading.Thread(target=consumer.consume) \n\n render_executor.start()\n consumer_executor.start()\n\n fig = vp.Fig()\n ax_left = fig[0, 0]\n ax_right = fig[0, 1]\n\n data = np.random.randn(10, 2)\n ax_left.plot(data)\n ax_right.histogram(data[1])\n\n start = time.time_ns()\n count = 0\n while True:\n count += 1\n\n image = fig.render()\n # io.imsave(\"wonderful.png\",image)\n io.write_png(\"wonderful.png\", image)\n # renderer.push(io._make_png, image, 0)\n # io._make_png(image, level=0)\n\n if (time.time_ns() - start) > 1e9:\n print(f'Producer: {count} {renderer.queue_in.qsize()}', flush=True)\n count = 0\n start = time.time_ns()","repo_name":"EricVoll/ARbotics","sub_path":"Python/debug/vispy_test.py","file_name":"vispy_test.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"2439040960","text":"from sklearn.compose import ColumnTransformer\r\nimport numpy as np\r\nimport pandas as pd\r\nimport typing\r\nfrom typing import Sequence\r\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\r\nfrom sklearn.model_selection import StratifiedKFold, cross_validate\r\nfrom sklearn.pipeline import make_pipeline\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef binary_to_labeled(value: int) -> str:\r\n '''Changes binary features to \"Yes\" (1) and \"No\" (0)'''\r\n if value == 0:\r\n return 'No'\r\n \r\n return 'Yes'\r\n\r\n\r\ndef multiple_countplots(df: pd.DataFrame, columns: Sequence[str], target_feature: str):\r\n '''\r\n Given a pandas DataFrame, list of columns labels and target feature \r\n plots countlpots for all given columns with hue set as target feature.\r\n '''\r\n fig, ax = plt.subplots(1 * ((len(columns)-1) // 3 + 1), 3, figsize=(12, 4 * ((len(columns)-1) // 3 + 1)))\r\n\r\n axs = ax.flatten()\r\n\r\n for i, col in enumerate(columns):\r\n counts = df.groupby([col, target_feature]).size()\r\n\r\n levels = [counts.index.levels[0], counts.index.levels[1]]\r\n new_index = pd.MultiIndex.from_product(levels, names=counts.index.names)\r\n\r\n counts = counts.reindex(new_index, fill_value=0)\r\n\r\n values = df[col].unique()\r\n bottom = [counts[value, 0] for value in values]\r\n top = [counts[value, 1] for value in values]\r\n\r\n axs[i].bar(values, bottom)\r\n axs[i].bar(values, top, bottom=bottom)\r\n \r\n for container in axs[i].containers:\r\n labels = [value.get_height() if value.get_height() > 0 else '' for value in container]\r\n \r\n axs[i].bar_label(container, labels=labels, label_type='center')\r\n\r\n for label in axs[i].get_xticklabels():\r\n label.set_rotation(30)\r\n\r\n axs[i].set_xlabel(col)\r\n\r\n plt.tight_layout()\r\n\r\n\r\ndef categorize_age(age: float) -> str:\r\n '''\r\n Categorizes age value into one of these categories:\r\n - child\r\n - adult\r\n - elder\r\n '''\r\n if age < 18:\r\n return 'child'\r\n elif age < 65:\r\n return 'adult'\r\n \r\n return 'elder'\r\n\r\n\r\ndef categorize_bmi(bmi: float) -> str:\r\n '''\r\n Categorizes BMI value into one of these categories:\r\n - underweight\r\n - normal\r\n - overweight\r\n - obese\r\n - extremely obese\r\n '''\r\n if bmi < 18.5:\r\n return 'underweight'\r\n elif bmi < 25:\r\n return 'normal'\r\n elif bmi < 30:\r\n return 'overweight'\r\n elif bmi < 35:\r\n return 'obese'\r\n \r\n return 'extremely obese'\r\n\r\n\r\ndef categorize_glucose(avg_glucose_level: float) -> str:\r\n '''\r\n Categorizes glucose value into one of these categories:\r\n - low\r\n - normal\r\n - prediabetes\r\n - diabetes\r\n '''\r\n if avg_glucose_level < 70:\r\n return 'low'\r\n elif avg_glucose_level < 100:\r\n return 'normal'\r\n elif avg_glucose_level < 125:\r\n return 'prediabetes'\r\n \r\n return 'diabetes'\r\n\r\n\r\ndef xy_split(df: pd.DataFrame, target_feature: str) -> tuple[pd.DataFrame]:\r\n '''\r\n Cuts off a target column of the DataFrame\r\n \r\n Returns two DataFrames:\r\n train_X and train_y\r\n '''\r\n return df.drop(columns=target_feature), df[target_feature]\r\n\r\n\r\ndef get_model_name(variable: object) -> str:\r\n '''Returns model name given a model'''\r\n return str(variable).split('(')[0]\r\n\r\n\r\ndef brute_force_models(train_X: pd.DataFrame, train_y: pd.Series, categorical_columns: Sequence[str], numerical_columns: Sequence[str], scalers: Sequence[object], encoders: Sequence[object], models: Sequence[object]) -> pd.DataFrame:\r\n '''\r\n This function tries every given combination of encoders, scalers and models.\r\n\r\n It needs to be provided with:\r\n train_X - training dataset\r\n train_y - target dataset\r\n categorical_columns - list of labels of categorical columns in train_X\r\n numerical_columns - list of labels of numerical columns in train_X\r\n scalers - list of scalers\r\n encoders - list of encoder\r\n models - list of models\r\n\r\n Returns a pandas DataFrame with all combinations and their accuracy, recall, precision and f1 score\r\n '''\r\n df = pd.DataFrame(columns=['encoder', 'scaler', 'model', 'accuracy', 'recall', 'precision', 'f1_score'])\r\n \r\n for encoder in encoders:\r\n for scaler in scalers:\r\n transformer = ColumnTransformer([('categorical', encoder, categorical_columns), ('numerical', scaler, numerical_columns)])\r\n for model in models:\r\n pipe = make_pipeline(transformer, model)\r\n \r\n pipe.fit(train_X, train_y)\r\n\r\n scores = cross_validate(pipe, train_X, train_y, scoring=['accuracy', 'recall', 'precision', 'f1'], cv=StratifiedKFold(shuffle=True, n_splits=5, random_state=42))\r\n\r\n accuracy = np.mean(scores['test_accuracy'])\r\n recall = np.mean(scores['test_recall'])\r\n precision = np.mean(scores['test_precision'])\r\n f1 = np.mean(scores['test_f1'])\r\n\r\n list = [get_model_name(encoder), get_model_name(scaler), get_model_name(model), accuracy, recall, precision, f1]\r\n\r\n df.loc[len(df)] = list\r\n\r\n return df\r\n\r\n\r\ndef adjust_class(pred: Sequence[float], t: float) -> list[int]:\r\n '''Given a list of probabilities and a threshold, returns a list with binary classes'''\r\n return [1 if y >= t else 0 for y in pred]\r\n\r\n\r\ndef model_report(y_test: Sequence[float], y_pred: Sequence[float]):\r\n '''Given y_test and y_pred provides accuracy score, confusiom matrix and classification report'''\r\n print('Accuracy:\\n\\t', accuracy_score(y_test, y_pred), '\\n')\r\n print('Confusion matrix:\\n', confusion_matrix(y_test, y_pred), '\\n')\r\n print('Classification report:\\n', classification_report(y_test, y_pred), '\\n')\r\n\r\n\r\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\r\n '''\r\n Plots precision recall curve\r\n '''\r\n plt.figure(figsize=(8, 8))\r\n plt.title(\"Precision and Recall Scores as a function of the decision threshold\")\r\n plt.plot(thresholds, precisions[:-1], \"b--\", linewidth=2, label=\"Precision\")\r\n plt.plot(thresholds, recalls[:-1], \"g-\", linewidth=2, label=\"Recall\")\r\n plt.ylabel(\"Score\")\r\n plt.xlabel(\"Decision Threshold\")\r\n plt.legend(loc='best')\r\n\r\n\r\ndef plot_roc_curve(fpr, tpr, label):\r\n '''\r\n Plots ROC curve given False Positive Rate and True Positive Rate\r\n '''\r\n gmeans = np.sqrt(tpr * (1-fpr))\r\n ix = np.argmax(gmeans)\r\n\r\n plt.plot(fpr, tpr, linewidth=2, label=label)\r\n plt.plot([0, 1], [0, 1], 'k--')\r\n plt.scatter(fpr[ix], tpr[ix], marker='o', color='black', label='Best')\r\n\r\n plt.title('ROC Curve')\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.legend()\r\n\r\n plt.show()","repo_name":"sugartitan/turing-ml2","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":6843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25134267079","text":"# You cannot add items to a tuple:\r\n# thistuple = (\"apple\", \"banana\", \"cherry\")\r\n# thistuple[3] = \"orange\" # This will raise an error\r\n# print(thistuple)\r\n\r\ntuple1 = (\"sagar\",\"meet\",\"jt\")\r\nprint(tuple1)\r\n\r\nlist1 = list(tuple1)\r\nlist1[1]=\"jay\"\r\ntuple1 = tuple(list1)\r\nprint(tuple1)","repo_name":"SS1908/30_days_of_python","sub_path":"Day 5/Change_value_of_tuple.py","file_name":"Change_value_of_tuple.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73146925993","text":"#86593385\nfrom typing import List, Tuple\n\n\ndef get_max_points(matrix: List[str], k: int) -> int:\n points = 0\n count = [0] * 16\n dot = '.'\n for number in matrix:\n if number != dot:\n count[int(number)] += 1\n for number in count:\n if 0 < number <= k * 2:\n points += 1\n return points\n\n\ndef read_input() -> Tuple[List[str], int]:\n k = int(input())\n matrix = [number for number in range(4) for number in input().strip()]\n return matrix, k\n\n\ndef main():\n matrix, k = read_input()\n print(get_max_points(matrix, k))\n\n\nif __name__ == '__main__':\n main()","repo_name":"SergeyKataev1/YP_algorithms","sub_path":"sleight_of_hand.py","file_name":"sleight_of_hand.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73285129833","text":"\"\"\"\nClass that implements the common functionality for the format of templates\n\"\"\"\nfrom optparse import OptionGroup\n\nfrom PyFoam import configuration as config\n\nclass CommonTemplateFormat(object):\n \"\"\" The class that defines options for template formats\n \"\"\"\n\n def addOptions(self):\n tformat=OptionGroup(self.parser,\n \"Format\",\n \"Specifying details about the format of the pyratemp-templates (new format)\")\n self.parser.add_option_group(tformat)\n tformat.add_option(\"--expression-delimiter\",\n action=\"store\",\n default=config().get(\"Template\",\"expressionDelimiter\"),\n dest=\"expressionDelimiter\",\n help=\"String that delimits an expression. At the end of the expression the reverse string is being used. Default: %default\")\n tformat.add_option(\"--assignment-line-start\",\n action=\"store\",\n default=config().get(\"Template\",\"assignmentLineStart\"),\n dest=\"assignmentLineStart\",\n help=\"String at the start of a line that signifies that this is an assignment. Default: %default\")\n","repo_name":"nextfoam/baram","sub_path":"PyFoam/Applications/CommonTemplateFormat.py","file_name":"CommonTemplateFormat.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"15171739009","text":"mod = 10**9+7\r\n\r\n\r\nH, W, A, B = map(int, input().split())\r\n\r\n\r\nclass Fact():\r\n def __init__(self, H, W):\r\n self.mod = 10**9+7\r\n self.facts = [1]*(H+W-1)\r\n for i in range(1, H+W-1):\r\n self.facts[i] = self.facts[i-1]*i % mod\r\n\r\n self.i = [pow(self.facts[-1], mod-2, mod)]\r\n for i in range(1, H+W-1)[::-1]:\r\n self.i.append(self.i[-1]*i % mod)\r\n self.i.reverse()\r\n\r\n def comb(self, n, r):\r\n ret = self.facts[n] * self.i[n-r]*self.i[r]\r\n return ret\r\n\r\n def print_fact(self):\r\n print(self.facts)\r\n\r\n\r\nf = Fact(H, W)\r\n\r\nall_path = f.comb(H+W-2, H-1)\r\n\r\nremove_path = 0\r\nfor i in range(B):\r\n tmp = f.comb(H-A+i-1, i)\r\n tmp *= f.comb(W-i-1+A-1, A-1)\r\n remove_path += tmp\r\n\r\nremove_path %= mod\r\n\r\nprint((all_path - remove_path) % mod)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc058/B/4088548.py","file_name":"4088548.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"1503461650","text":"import json\n# 总提交次数前一半的斜率\n# 存储题目被提交次数\ntimes = [0] * 3000\n# 记录参与人总体分数变动值\nscores = [0] * 3000\n\n\ndef jsonRead(file_path):\n f = open(file_path, 'r', encoding='utf-8')\n data = json.load(f)\n for each in data:\n eachRecord = data[each]\n cases = eachRecord['cases']\n for eachCase in cases:\n uploads = eachCase['upload_records']\n case_id = eachCase['case_id']\n half_len = len(uploads) // 2 + 1 if len(uploads) % 2 else len(uploads) // 2\n # 改为一半提交的最大得分\n half_slope = max(uploads[0:half_len], key=lambda x: x['score'])['score'] / half_len if len(\n uploads) != 0 else 0\n times[int(case_id)] += 1\n scores[int(case_id)] += float(half_slope)\n f.close()\n\n\ndef main_thread(file_path):\n jsonRead(file_path)\n dictHalfSlope = {}\n for i in range(0, 3000):\n if (times[i] != 0):\n dictHalfSlope[str(i)] = float(scores[i] / times[i])\n return dictHalfSlope\n","repo_name":"Analysismen/Student-Coding-Ability","sub_path":"component/HalfSlope.py","file_name":"HalfSlope.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41104624918","text":"import pulp\nimport numpy as np\nimport pandas as pd\nimport sys\nimport os\nsys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + \"/../..\")\n\nfor p in sys.path:\n print(p)\n\nfrom src.data.download import download_from_github\n\n\n\ndef select_team(\n expected_scores, prices, positions, clubs, total_budget=100, sub_factor=0.2\n):\n num_players = len(expected_scores)\n model = pulp.LpProblem(\"Constrained value maximisation\", pulp.LpMaximize)\n decisions = [\n pulp.LpVariable(\"x{}\".format(i), lowBound=0, upBound=1, cat=\"Integer\")\n for i in range(num_players)\n ]\n captain_decisions = [\n pulp.LpVariable(\"y{}\".format(i), lowBound=0, upBound=1, cat=\"Integer\")\n for i in range(num_players)\n ]\n sub_decisions = [\n pulp.LpVariable(\"z{}\".format(i), lowBound=0, upBound=1, cat=\"Integer\")\n for i in range(num_players)\n ]\n\n # objective function:\n model += (\n sum(\n (captain_decisions[i] + decisions[i] + sub_decisions[i] * sub_factor)\n * expected_scores[i]\n for i in range(num_players)\n ),\n \"Objective\",\n )\n\n # cost constraint\n model += (\n sum((decisions[i] + sub_decisions[i]) * prices[i] for i in range(num_players))\n <= total_budget\n ) # total cost\n\n # position constraints\n # 1 starting goalkeeper\n model += sum(decisions[i] for i in range(num_players) if positions[i] == \"GK\") == 1\n # 2 total goalkeepers\n model += (\n sum(\n decisions[i] + sub_decisions[i]\n for i in range(num_players)\n if positions[i] == \"GK\"\n )\n == 2\n )\n\n # 3-5 starting defenders\n model += sum(decisions[i] for i in range(num_players) if positions[i] == \"DEF\") >= 3\n model += sum(decisions[i] for i in range(num_players) if positions[i] == \"DEF\") <= 5\n # 5 total defenders\n model += (\n sum(\n decisions[i] + sub_decisions[i]\n for i in range(num_players)\n if positions[i] == \"DEF\"\n )\n == 5\n )\n\n # 3-5 starting midfielders\n model += sum(decisions[i] for i in range(num_players) if positions[i] == \"MID\") >= 3\n model += sum(decisions[i] for i in range(num_players) if positions[i] == \"MID\") <= 5\n # 5 total midfielders\n model += (\n sum(\n decisions[i] + sub_decisions[i]\n for i in range(num_players)\n if positions[i] == \"MID\"\n )\n == 5\n )\n\n # 1-3 starting attackers\n model += sum(decisions[i] for i in range(num_players) if positions[i] == \"FWD\") >= 1\n model += sum(decisions[i] for i in range(num_players) if positions[i] == \"FWD\") <= 3\n # 3 total attackers\n model += (\n sum(\n decisions[i] + sub_decisions[i]\n for i in range(num_players)\n if positions[i] == \"FWD\"\n )\n == 3\n )\n\n # club constraint\n for club_id in np.unique(clubs):\n model += (\n sum(\n decisions[i] + sub_decisions[i]\n for i in range(num_players)\n if clubs[i] == club_id\n )\n <= 3\n ) # max 3 players\n\n model += sum(decisions) == 11 # total team size\n model += sum(captain_decisions) == 1 # 1 captain\n\n for i in range(num_players):\n model += (\n decisions[i] - captain_decisions[i]\n ) >= 0 # captain must also be on team\n model += (decisions[i] + sub_decisions[i]) <= 1 # subs must not be on team\n\n model.solve()\n print(\"Total expected score = {}\".format(model.objective.value()))\n decisions_bin = [int(d.value()) for d in decisions]\n captain_decisions_bin = [int(d.value()) for d in captain_decisions]\n sub_decisions_bin = [int(d.value()) for d in sub_decisions]\n\n return decisions_bin, captain_decisions_bin, sub_decisions_bin\n\n\ndef main():\n df = download_from_github(season=\"2023-24\", gw=8)\n df_reduced = df.loc[:, [\"name\", \"position\", \"team\", \"total_points\", \"value\"]]\n decisions_bin, captain_decisions_bin, sub_decisions_bin = select_team(\n expected_scores=df_reduced[\"total_points\"],\n prices=df_reduced[\"total_points\"],\n positions=df_reduced[\"position\"],\n clubs=df_reduced[\"team\"],\n total_budget=1000,\n sub_factor=0.0,\n )\n df_reduced[\"selected\"] = decisions_bin\n df_reduced[\"captain\"] = captain_decisions_bin\n df_reduced[\"substitute\"] = sub_decisions_bin\n\n df_squad = df_reduced.loc[\n (df_reduced[\"selected\"] == 1) | (df_reduced[\"substitute\"] == 1), :\n ]\n df_squad[\"actual_points\"] = np.where(\n df_squad.loc[:, \"substitute\"] == 1,\n 0,\n np.where(\n df_squad.loc[:, \"captain\"] == 1,\n df_squad.loc[:, \"total_points\"] * 2,\n df_squad.loc[:, \"total_points\"],\n ),\n )\n print(df_squad)\n print(df_squad.loc[:, [\"value\", \"actual_points\"]].sum())\n return None\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RACollins/fpl-perfect-gw","sub_path":"src/opt/perfect_gw_lp.py","file_name":"perfect_gw_lp.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12896580225","text":"from flask import Blueprint, request, jsonify\nfrom .actions import DeviceController\nfrom .models import DeviceInfo\n\"\"\"\nDevice Control API Endpoints\n\nThis module defines a set of routes that serve as the API endpoints for system control operations.\nIt enables clients to interact with devices by providing functionalities such as pausing,\nresuming, and changing the rate of a device. Each route corresponds to a specific action\nand requires certain parameters, like device ID, port, and IP address, to execute the action.\n\nThe module leverages a `DeviceController` class from the `actions` module to perform these\noperations and uses the `DeviceInfo` class from the `models` module to structure the data.\n\nFuture will be to add the capability to control other part of the system, \nand supervise the condition of the system.\n\nRoutes:\n- `/test`: Tests the connectivity to the API.\n- `/pause`: Pause a specific device.\n- `/resume`: Resume a paused device.\n- `/change_rate`: Change the rate of a device.\n\n\"\"\"\ncontrol_blueprint = Blueprint('control', __name__, url_prefix=\"/api/control\")\n\ndevice_controller = DeviceController()\n\n\n@control_blueprint.route('/test')\ndef hello():\n \"\"\"\n Tests the connectivity to the API.\n\n :return: A message confirming successful connection.\n :doc-author: Yukkei\n \"\"\"\n return '

Connection Success

'\n\n\n@control_blueprint.route(\"/pause\", methods=['POST'])\ndef pause_device():\n \"\"\"\n Pause a device given its ID, port, and IP address.\n\n :return: The result of the pause action and a corresponding status code.\n :doc-author: Yukkei\n \"\"\"\n device_id = request.json.get('device_id')\n device_port = request.json.get('port')\n device_ip = request.json.get('ip_address')\n device_info = DeviceInfo(device_id, device_ip, device_port)\n\n result, status_code = device_controller.pause(device_info)\n return result, status_code\n\n\n@control_blueprint.route(\"/resume\", methods=['POST'])\ndef resume_device():\n \"\"\"\n Resume a paused device given its ID, port, and IP address.\n\n :return: The result of the resume action and a corresponding status code.\n :doc-author: Yukkei\n \"\"\"\n device_id = request.json.get('device_id')\n device_port = request.json.get('port')\n device_ip = request.json.get('ip_address')\n device_info = DeviceInfo(device_id, device_ip, device_port)\n\n result, status_code = device_controller.resume(device_info)\n return result, status_code\n\n\n@control_blueprint.route(\"/change_rate\", methods=['POST'])\ndef change_rate():\n \"\"\"\n Change the rate of a device given its ID, port, IP address, and the desired rate.\n\n :return: The result of the rate change action and a corresponding status code.\n :doc-author: Yukkei\n \"\"\"\n device_id = request.json.get('device_id')\n device_port = request.json.get('port')\n device_ip = request.json.get('ip_address')\n device_info = DeviceInfo(device_id, device_ip, device_port)\n\n rate = request.json.get('rate')\n result, status_code = device_controller.change_rate(device_info, rate)\n return result, status_code\n","repo_name":"yuk-kei/iot-platform-backend","sub_path":"supervisor-service/application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15209789059","text":"#!/usr/bin/python\n\nimport sys\n\nINF = 1000000\n\ndef main():\n\tn=int(raw_input().strip())\n\tfor i in range(n):\n\t\t[h,w]=[int(x) for x in raw_input().strip().split()]\n\t\tm=[[INF]*(w+2)]+[[INF]+[int(x) for x in raw_input().strip().split()]+[INF] for _ in range(h)]+[[INF]*(w+2)]\n\t\tgr={}\n\t\tss=[]\n\t\tfor r in range(h):\n\t\t\tfor c in range(w):\n\t\t\t\tbr=r\n\t\t\t\tbc=c\n\t\t\t\tfor (dr,dc) in [(r-1,c),(r,c-1),(r,c+1),(r+1,c)]:\n\t\t\t\t\tif m[br+1][bc+1]>m[dr+1][dc+1]:\n\t\t\t\t\t\tbr=dr\n\t\t\t\t\t\tbc=dc\n\t\t\t\tif br==r and bc==c:\n\t\t\t\t\tss.append((r,c))\n\t\t\t\telse:\n\t\t\t\t\tgr.setdefault((br,bc),[]).append((r,c))\n\t\tsm={}\n\t\tdef dosm(s,v):\n\t\t\tsm[v]=s\n\t\t\tfor vv in gr.setdefault(v,[]):\n\t\t\t\tdosm(s,vv)\n\t\tfor s in ss:\n\t\t\tdosm(s,s)\n\t\tnl='a'\n\t\tsmm={}\n\t\tsys.stdout.write(\"Case #%d:\\n\" % (i+1))\n\t\tfor r in range(h):\n\t\t\tfor c in range(w):\n\t\t\t\ts=sm[(r,c)]\n\t\t\t\tif s not in smm:\n\t\t\t\t\tsmm[s]=nl\n\t\t\t\t\tnl=chr(ord(nl)+1)\n\t\t\t\tsys.stdout.write(smm[s])\n\t\t\t\tif c